From 5abfa4b14e068e01c58477492cdf58e165e7b053 Mon Sep 17 00:00:00 2001 From: jianli-97 Date: Tue, 10 Oct 2023 13:47:24 +0800 Subject: [PATCH] modify upstream, tag v0.15.0, kola testcase for nestos --- Dockerfile | 2 +- build.sh | 6 +- go.work | 3 + mantle/README.md | 13 +- mantle/auth/azure.go | 63 +- mantle/auth/google.go | 130 +- mantle/cmd/kola/devshell.go | 74 +- mantle/cmd/kola/kola.go | 104 +- mantle/cmd/kola/options.go | 50 +- mantle/cmd/kola/qemuexec.go | 6 +- mantle/cmd/kola/switchkernel.go | 6 +- mantle/cmd/kola/testiso.go | 194 +- mantle/cmd/kolet/kolet.go | 20 +- mantle/cmd/ore/aliyun/visibility.go | 2 +- mantle/cmd/ore/aws/upload.go | 13 +- mantle/cmd/ore/azure/create-image.go | 67 +- .../{create-image-arm.go => delete-blob.go} | 83 +- mantle/cmd/ore/azure/delete-image.go | 11 +- mantle/cmd/ore/azure/replicate-image.go | 109 - mantle/cmd/ore/azure/upload-blob-arm.go | 116 - mantle/cmd/ore/azure/upload-blob.go | 58 +- mantle/cmd/ore/esx/create-base.go | 23 +- mantle/cmd/ore/gcloud/gcloud.go | 2 +- mantle/cmd/ore/gcloud/index.go | 125 - mantle/cmd/ore/gcloud/promote-image.go | 10 +- mantle/cmd/ore/gcloud/sync.go | 108 - mantle/cmd/ore/ibmcloud/copy-object.go | 12 +- mantle/cmd/ore/openstack/create.go | 14 +- mantle/cmd/plume/cosa2stream.go | 2 +- mantle/cmd/plume/fcos.go | 60 - mantle/cmd/plume/fedora.go | 163 - mantle/cmd/plume/plume.go | 6 - mantle/cmd/plume/prerelease.go | 569 - mantle/cmd/plume/release.go | 144 +- mantle/cmd/plume/stream_mirror.go | 8 +- mantle/cmd/plume/types.go | 86 - mantle/fcos/metadata.go | 5 +- mantle/go.mod | 76 +- mantle/go.sum | 184 +- mantle/harness/harness.go | 49 +- mantle/harness/harness_test.go | 33 +- mantle/harness/reporters/json.go | 4 +- mantle/harness/reporters/reporter.go | 6 +- mantle/kola/cluster/cluster.go | 15 - mantle/kola/harness.go | 318 +- mantle/kola/register/register.go | 5 + mantle/kola/tests/coretest/core.go | 65 +- mantle/kola/tests/crio/crio.go | 25 +- mantle/kola/tests/docker/docker.go | 2 +- mantle/kola/tests/ignition/empty.go | 8 +- mantle/kola/tests/ignition/luks.go | 17 +- mantle/kola/tests/ignition/passwd.go | 10 +- mantle/kola/tests/ignition/resource.go | 71 +- mantle/kola/tests/ignition/security.go | 2 +- mantle/kola/tests/ignition/symlink.go | 4 +- mantle/kola/tests/ignition/systemd.go | 2 +- mantle/kola/tests/isula/isula.go | 11 +- mantle/kola/tests/misc/auth.go | 2 +- mantle/kola/tests/misc/boot-mirror.go | 122 +- mantle/kola/tests/misc/files.go | 92 +- mantle/kola/tests/misc/multipath.go | 84 +- mantle/kola/tests/misc/network.go | 23 +- mantle/kola/tests/misc/selinux.go | 1 - mantle/kola/tests/misc/users.go | 15 +- mantle/kola/tests/ostree/basic.go | 15 +- mantle/kola/tests/ostree/unlock.go | 38 +- mantle/kola/tests/podman/podman.go | 12 +- mantle/kola/tests/rhcos/upgrade.go | 32 +- mantle/kola/tests/rpmostree/deployments.go | 34 +- mantle/kola/tests/rpmostree/rebase.go | 9 +- mantle/kola/tests/upgrade/basic.go | 9 +- mantle/kola/tests/util/containers.go | 23 +- mantle/network/ssh.go | 26 +- mantle/network/ssh_test.go | 2 +- mantle/platform/api/aliyun/api.go | 7 +- mantle/platform/api/aws/ami.go | 90 - mantle/platform/api/aws/api.go | 10 +- mantle/platform/api/aws/ec2.go | 171 +- mantle/platform/api/aws/images.go | 14 +- mantle/platform/api/aws/network.go | 81 +- mantle/platform/api/azure/api.go | 53 +- mantle/platform/api/azure/image.go | 84 - mantle/platform/api/azure/instance.go | 26 +- mantle/platform/api/azure/options.go | 4 - mantle/platform/api/azure/replication.go | 103 - mantle/platform/api/azure/storage.go | 62 +- mantle/platform/api/azure/storage_mit.go | 45 +- mantle/platform/api/gcloud/api.go | 25 +- mantle/platform/api/gcloud/compute.go | 21 +- mantle/platform/api/gcloud/pending.go | 6 +- mantle/platform/api/ibmcloud/s3.go | 11 +- mantle/platform/api/openstack/api.go | 4 +- mantle/platform/api/packet/api.go | 10 +- mantle/platform/cluster.go | 43 +- mantle/platform/conf/conf.go | 44 +- mantle/platform/conf/conf_test.go | 2 +- mantle/platform/machine/aws/cluster.go | 2 +- mantle/platform/machine/aws/flight.go | 11 +- mantle/platform/machine/aws/machine.go | 1 + mantle/platform/machine/azure/cluster.go | 7 +- mantle/platform/machine/azure/flight.go | 29 +- mantle/platform/machine/gcloud/cluster.go | 2 +- mantle/platform/machine/qemuiso/cluster.go | 4 +- mantle/platform/machine/unprivqemu/cluster.go | 7 +- mantle/platform/metal.go | 84 +- mantle/platform/platform.go | 92 +- mantle/platform/qemu.go | 97 +- mantle/platform/util.go | 10 +- mantle/rhcos/metadata.go | 5 +- mantle/sdk/const.go | 25 - mantle/sdk/download.go | 298 - mantle/sdk/repo/manifest.go | 453 - mantle/sdk/repo/manifest_test.go | 95 - mantle/sdk/verify.go | 109 - mantle/storage/bucket.go | 388 - mantle/storage/bucket_test.go | 90 - mantle/storage/index/indexer.go | 156 - mantle/storage/index/job.go | 126 - mantle/storage/index/sync.go | 92 - mantle/storage/index/tree.go | 93 - mantle/storage/object.go | 81 - mantle/storage/object_test.go | 100 - mantle/storage/storage.go | 25 - mantle/storage/sync.go | 152 - mantle/system/anonfile_linux.go | 97 - mantle/system/anonfile_linux_test.go | 144 - mantle/system/copy.go | 10 - mantle/system/copy_test.go | 30 +- mantle/system/exec/exec.go | 11 + mantle/system/hostname.go | 39 - mantle/system/mount_linux.go | 194 - mantle/system/nproc.go | 84 +- mantle/system/ns/enter.go | 13 +- mantle/system/ns/exec.go | 36 +- mantle/system/symlink.go | 25 - mantle/system/user/user.go | 74 - mantle/system/user/user_test.go | 94 - mantle/util/common.go | 6 +- mantle/{sdk => util}/distros.go | 10 +- mantle/util/logio.go | 38 - mantle/{sdk => util}/repo.go | 14 +- mantle/util/xz.go | 62 - .../azure-sdk-for-go/management/README.md | 19 - .../azure-sdk-for-go/management/client.go | 138 - .../azure-sdk-for-go/management/errors.go | 36 - .../Azure/azure-sdk-for-go/management/http.go | 190 - .../management/location/client.go | 30 - .../management/location/entities.go | 37 - .../azure-sdk-for-go/management/operations.go | 92 - .../management/publishSettings.go | 108 - .../management/storageservice/client.go | 108 - .../management/storageservice/entities.go | 79 - .../Azure/azure-sdk-for-go/management/util.go | 11 - .../github.com/IBM-Cloud/bluemix-go/go.mod | 12 - .../github.com/IBM-Cloud/bluemix-go/go.sum | 186 - .../vendor/github.com/clarketm/json/decode.go | 120 +- .../vendor/github.com/clarketm/json/encode.go | 114 +- .../vendor/github.com/clarketm/json/fuzz.go | 1 + .../github.com/clarketm/json/scanner.go | 31 +- .../coreos/butane/base/util/file.go | 14 +- .../github.com/coreos/butane/base/util/url.go | 23 +- .../coreos/butane/base/v0_2/translate.go | 37 +- .../coreos/butane/base/v0_3/translate.go | 37 +- .../coreos/butane/base/v0_4/translate.go | 36 +- .../coreos/butane/base/v0_5_exp/translate.go | 36 +- .../coreos/butane/config/common/errors.go | 8 + .../github.com/coreos/butane/config/config.go | 20 +- .../butane/config/fcos/v1_5_exp/schema.go | 10 + .../butane/config/fcos/v1_5_exp/translate.go | 74 +- .../butane/config/fcos/v1_5_exp/validate.go | 12 + .../butane/config/flatcar/v1_0/schema.go} | 18 +- .../butane/config/flatcar/v1_0/translate.go | 60 + .../butane/config/flatcar/v1_1_exp/schema.go} | 13 +- .../config/flatcar/v1_1_exp/translate.go | 60 + .../config/openshift/v4_10/translate.go | 7 + .../config/openshift/v4_11/result/schema.go} | 55 +- .../butane/config/openshift/v4_11/schema.go} | 41 +- .../{v4_11_exp => v4_11}/translate.go | 42 +- .../{v4_11_exp => v4_11}/validate.go | 2 +- .../config/openshift/v4_12/result/schema.go} | 47 +- .../butane/config/openshift/v4_12/schema.go} | 44 +- .../config/openshift/v4_12/translate.go | 279 + .../config/openshift/v4_12/validate.go} | 51 +- .../{v4_11_exp => v4_13_exp}/result/schema.go | 0 .../{v4_11_exp => v4_13_exp}/schema.go | 2 +- .../config/openshift/v4_13_exp/translate.go | 321 + .../config/openshift/v4_13_exp/validate.go} | 44 +- .../github.com/coreos/butane/translate/set.go | 31 + .../cosa/cosa_reader.go | 252 - .../cosa/schema_doc.go | 869 -- .../coreos-assembler}/LICENSE | 8 +- .../pkg/builds}/build.go | 52 +- .../pkg/builds}/builds.go | 5 +- .../pkg/builds}/cosa_v1.go | 81 +- .../pkg/builds}/schema.go | 2 +- .../coreos-assembler/pkg/builds/schema_doc.go | 980 ++ .../github.com/coreos/go-json/decode.go | 51 +- .../github.com/coreos/go-json/encode.go | 72 +- .../vendor/github.com/coreos/go-json/fuzz.go | 1 + .../vendor/github.com/coreos/go-json/go.mod | 3 - .../coreos/go-systemd/v22/dbus/dbus.go | 66 +- .../coreos/go-systemd/v22/dbus/methods.go | 480 +- .../coreos/go-systemd/v22/journal/journal.go | 179 - .../go-systemd/v22/journal/journal_unix.go | 215 + .../v22/journal/journal_windows.go} | 24 +- .../coreos/go-systemd/v22/unit/deserialize.go | 98 +- .../coreos/go-systemd/v22/unit/escape.go | 21 +- .../go-systemd/v22/unit/section.go} | 36 +- .../coreos/go-systemd/v22/unit/serialize.go | 23 + .../v2/config/shared/errors/errors.go | 2 + .../v2/config/v3_0/types/directory.go | 1 + .../ignition/v2/config/v3_0/types/file.go | 1 + .../ignition/v2/config/v3_0/types/mode.go | 10 + .../v2/config/v3_1/types/directory.go | 1 + .../ignition/v2/config/v3_1/types/file.go | 1 + .../ignition/v2/config/v3_1/types/mode.go | 10 + .../v2/config/v3_2/types/directory.go | 1 + .../ignition/v2/config/v3_2/types/file.go | 1 + .../ignition/v2/config/v3_2/types/mode.go | 10 + .../v2/config/v3_3/types/directory.go | 1 + .../ignition/v2/config/v3_3/types/file.go | 1 + .../ignition/v2/config/v3_3/types/mode.go | 10 + .../v3_4_experimental/translate/translate.go | 28 + .../v2/config/v3_4_experimental/types/url.go | 26 + .../github.com/coreos/ioprogress/LICENSE | 21 - .../github.com/coreos/ioprogress/README.md | 42 - .../github.com/coreos/ioprogress/draw.go | 135 - .../github.com/coreos/ioprogress/reader.go | 107 - .../stream-metadata-go/fedoracoreos/fcos.go | 4 +- .../stream-metadata-go/release/release.go | 15 + .../stream-metadata-go/release/translate.go | 53 +- .../stream/artifact_utils.go | 15 +- .../stream-metadata-go/stream/stream.go | 34 +- .../stream-metadata-go/stream/stream_utils.go | 8 +- .../github.com/digitalocean/go-libvirt/go.mod | 5 - .../github.com/digitalocean/go-libvirt/go.sum | 14 - .../github.com/digitalocean/godo/go.mod | 9 - .../github.com/digitalocean/godo/go.sum | 23 - .../github.com/dimchansky/utfbom/go.mod | 1 - .../github.com/dustin/go-humanize/.travis.yml | 21 - .../github.com/dustin/go-humanize/LICENSE | 21 - .../dustin/go-humanize/README.markdown | 124 - .../github.com/dustin/go-humanize/big.go | 31 - .../github.com/dustin/go-humanize/bigbytes.go | 173 - .../github.com/dustin/go-humanize/bytes.go | 143 - .../github.com/dustin/go-humanize/comma.go | 116 - .../github.com/dustin/go-humanize/commaf.go | 40 - .../github.com/dustin/go-humanize/ftoa.go | 46 - .../github.com/dustin/go-humanize/humanize.go | 8 - .../github.com/dustin/go-humanize/number.go | 192 - .../github.com/dustin/go-humanize/ordinals.go | 25 - .../github.com/dustin/go-humanize/si.go | 123 - .../github.com/dustin/go-humanize/times.go | 117 - .../github.com/godbus/dbus/v5/.travis.yml | 50 - .../github.com/godbus/dbus/v5/README.markdown | 4 +- .../vendor/github.com/godbus/dbus/v5/auth.go | 2 +- .../vendor/github.com/godbus/dbus/v5/call.go | 9 + .../vendor/github.com/godbus/dbus/v5/conn.go | 159 +- .../vendor/github.com/godbus/dbus/v5/dbus.go | 4 + .../godbus/dbus/v5/default_handler.go | 22 +- .../github.com/godbus/dbus/v5/export.go | 61 +- .../vendor/github.com/godbus/dbus/v5/go.mod | 3 - .../vendor/github.com/godbus/dbus/v5/go.sum | 0 .../vendor/github.com/godbus/dbus/v5/match.go | 27 + .../github.com/godbus/dbus/v5/object.go | 65 +- .../github.com/godbus/dbus/v5/sequence.go | 24 + .../godbus/dbus/v5/sequential_handler.go | 125 + .../vendor/github.com/godbus/dbus/v5/sig.go | 2 +- .../dbus/v5/transport_unixcred_freebsd.go | 1 + .../github.com/godbus/dbus/v5/variant.go | 6 + mantle/vendor/github.com/google/uuid/go.mod | 1 - .../github.com/googleapis/gax-go/v2/go.mod | 3 - .../github.com/googleapis/gax-go/v2/go.sum | 25 - .../github.com/gophercloud/gophercloud/go.mod | 9 - .../github.com/gophercloud/gophercloud/go.sum | 18 - .../github.com/jmespath/go-jmespath/go.mod | 5 - .../github.com/jmespath/go-jmespath/go.sum | 11 - .../vendor/github.com/json-iterator/go/go.mod | 11 - .../vendor/github.com/json-iterator/go/go.sum | 14 - .../github.com/klauspost/cpuid/.gitignore | 24 - .../github.com/klauspost/cpuid/.travis.yml | 46 - .../klauspost/cpuid/CONTRIBUTING.txt | 35 - .../vendor/github.com/klauspost/cpuid/LICENSE | 22 - .../github.com/klauspost/cpuid/README.md | 191 - .../github.com/klauspost/cpuid/cpuid.go | 1504 -- .../github.com/klauspost/cpuid/cpuid_386.s | 42 - .../github.com/klauspost/cpuid/cpuid_amd64.s | 42 - .../github.com/klauspost/cpuid/cpuid_arm64.s | 26 - .../klauspost/cpuid/detect_arm64.go | 219 - .../klauspost/cpuid/detect_intel.go | 33 - .../github.com/klauspost/cpuid/detect_ref.go | 14 - .../vendor/github.com/klauspost/cpuid/go.mod | 3 - .../vendor/github.com/minio/md5-simd/LICENSE | 202 - .../github.com/minio/md5-simd/README.md | 196 - .../minio/md5-simd/block-generic.go | 132 - .../github.com/minio/md5-simd/block16_amd64.s | 227 - .../github.com/minio/md5-simd/block8_amd64.s | 279 - .../github.com/minio/md5-simd/block_amd64.go | 199 - .../vendor/github.com/minio/md5-simd/go.mod | 7 - .../vendor/github.com/minio/md5-simd/go.sum | 2 - .../minio/md5-simd/md5-digest_amd64.go | 178 - .../minio/md5-simd/md5-server_amd64.go | 307 - .../minio/md5-simd/md5-server_fallback.go | 12 - .../minio/md5-simd/md5-util_amd64.go | 70 - .../vendor/github.com/minio/md5-simd/md5.go | 57 - .../github.com/minio/minio-go/v7/.gitignore | 4 - .../minio/minio-go/v7/.golangci.yml | 16 - .../vendor/github.com/minio/minio-go/v7/CNAME | 1 - .../minio/minio-go/v7/CONTRIBUTING.md | 23 - .../github.com/minio/minio-go/v7/LICENSE | 202 - .../minio/minio-go/v7/MAINTAINERS.md | 35 - .../github.com/minio/minio-go/v7/Makefile | 35 - .../github.com/minio/minio-go/v7/NOTICE | 9 - .../github.com/minio/minio-go/v7/README.md | 251 - .../minio/minio-go/v7/README_zh_CN.md | 260 - .../minio-go/v7/api-bucket-encryption.go | 134 - .../minio/minio-go/v7/api-bucket-lifecycle.go | 147 - .../minio-go/v7/api-bucket-notification.go | 255 - .../minio/minio-go/v7/api-bucket-policy.go | 142 - .../minio-go/v7/api-bucket-replication.go | 228 - .../minio/minio-go/v7/api-bucket-tagging.go | 135 - .../minio-go/v7/api-bucket-versioning.go | 137 - .../minio/minio-go/v7/api-compose-object.go | 580 - .../minio/minio-go/v7/api-copy-object.go | 77 - .../minio/minio-go/v7/api-datatypes.go | 173 - .../minio/minio-go/v7/api-error-response.go | 271 - .../minio/minio-go/v7/api-get-object-acl.go | 140 - .../minio/minio-go/v7/api-get-object-file.go | 127 - .../minio/minio-go/v7/api-get-object.go | 681 - .../minio/minio-go/v7/api-get-options.go | 140 - .../github.com/minio/minio-go/v7/api-list.go | 965 -- .../minio-go/v7/api-object-legal-hold.go | 176 - .../minio/minio-go/v7/api-object-lock.go | 241 - .../minio/minio-go/v7/api-object-retention.go | 165 - .../minio/minio-go/v7/api-object-tagging.go | 157 - .../minio/minio-go/v7/api-presigned.go | 216 - .../minio/minio-go/v7/api-put-bucket.go | 123 - .../minio-go/v7/api-put-object-common.go | 148 - .../v7/api-put-object-file-context.go | 64 - .../minio-go/v7/api-put-object-multipart.go | 394 - .../minio-go/v7/api-put-object-streaming.go | 487 - .../minio/minio-go/v7/api-put-object.go | 370 - .../minio/minio-go/v7/api-remove.go | 419 - .../minio/minio-go/v7/api-s3-datatypes.go | 361 - .../minio/minio-go/v7/api-select.go | 751 - .../github.com/minio/minio-go/v7/api-stat.go | 127 - .../github.com/minio/minio-go/v7/api.go | 896 -- .../minio/minio-go/v7/bucket-cache.go | 253 - .../minio/minio-go/v7/code_of_conduct.md | 80 - .../github.com/minio/minio-go/v7/constants.go | 92 - .../github.com/minio/minio-go/v7/core.go | 133 - .../minio/minio-go/v7/functional_tests.go | 11812 ---------------- .../github.com/minio/minio-go/v7/go.mod | 27 - .../github.com/minio/minio-go/v7/go.sum | 77 - .../minio/minio-go/v7/hook-reader.go | 85 - .../v7/pkg/credentials/assume_role.go | 214 - .../minio-go/v7/pkg/credentials/chain.go | 89 - .../v7/pkg/credentials/config.json.sample | 17 - .../v7/pkg/credentials/credentials.go | 182 - .../v7/pkg/credentials/credentials.sample | 12 - .../minio/minio-go/v7/pkg/credentials/doc.go | 62 - .../minio-go/v7/pkg/credentials/env_aws.go | 71 - .../minio-go/v7/pkg/credentials/env_minio.go | 68 - .../pkg/credentials/file_aws_credentials.go | 120 - .../v7/pkg/credentials/file_minio_client.go | 135 - .../minio-go/v7/pkg/credentials/iam_aws.go | 367 - .../v7/pkg/credentials/signature-type.go | 77 - .../minio-go/v7/pkg/credentials/static.go | 67 - .../v7/pkg/credentials/sts_client_grants.go | 162 - .../v7/pkg/credentials/sts_ldap_identity.go | 124 - .../v7/pkg/credentials/sts_web_identity.go | 181 - .../minio-go/v7/pkg/encrypt/server-side.go | 198 - .../minio-go/v7/pkg/lifecycle/lifecycle.go | 303 - .../minio-go/v7/pkg/notification/info.go | 78 - .../v7/pkg/notification/notification.go | 395 - .../v7/pkg/replication/replication.go | 696 - .../minio/minio-go/v7/pkg/s3utils/utils.go | 391 - .../minio/minio-go/v7/pkg/set/stringset.go | 200 - .../pkg/signer/request-signature-streaming.go | 306 - .../v7/pkg/signer/request-signature-v2.go | 317 - .../v7/pkg/signer/request-signature-v4.go | 318 - .../minio/minio-go/v7/pkg/signer/utils.go | 59 - .../minio/minio-go/v7/pkg/sse/sse.go | 66 - .../minio/minio-go/v7/pkg/tags/tags.go | 341 - .../minio/minio-go/v7/post-policy.go | 327 - .../minio/minio-go/v7/retry-continous.go | 69 - .../github.com/minio/minio-go/v7/retry.go | 124 - .../minio/minio-go/v7/s3-endpoints.go | 57 - .../github.com/minio/minio-go/v7/s3-error.go | 61 - .../github.com/minio/minio-go/v7/transport.go | 83 - .../github.com/minio/minio-go/v7/utils.go | 488 - .../github.com/minio/sha256-simd/.gitignore | 1 - .../github.com/minio/sha256-simd/.travis.yml | 25 - .../github.com/minio/sha256-simd/README.md | 133 - .../github.com/minio/sha256-simd/appveyor.yml | 32 - .../github.com/minio/sha256-simd/cpuid.go | 119 - .../github.com/minio/sha256-simd/cpuid_386.go | 24 - .../github.com/minio/sha256-simd/cpuid_386.s | 53 - .../minio/sha256-simd/cpuid_amd64.go | 24 - .../minio/sha256-simd/cpuid_amd64.s | 53 - .../github.com/minio/sha256-simd/cpuid_arm.go | 32 - .../github.com/minio/sha256-simd/go.mod | 3 - .../github.com/minio/sha256-simd/sha256.go | 409 - .../sha256-simd/sha256blockAvx2_amd64.go | 22 - .../minio/sha256-simd/sha256blockAvx2_amd64.s | 1449 -- .../sha256-simd/sha256blockAvx512_amd64.asm | 686 - .../sha256-simd/sha256blockAvx512_amd64.go | 500 - .../sha256-simd/sha256blockAvx512_amd64.s | 267 - .../minio/sha256-simd/sha256blockAvx_amd64.go | 22 - .../minio/sha256-simd/sha256blockAvx_amd64.s | 408 - .../minio/sha256-simd/sha256blockSha_amd64.go | 6 - .../minio/sha256-simd/sha256blockSha_amd64.s | 266 - .../sha256-simd/sha256blockSsse_amd64.go | 22 - .../minio/sha256-simd/sha256blockSsse_amd64.s | 429 - .../minio/sha256-simd/sha256block_amd64.go | 53 - .../minio/sha256-simd/sha256block_arm64.go | 37 - .../minio/sha256-simd/sha256block_arm64.s | 192 - .../minio/sha256-simd/sha256block_other.go | 25 - .../minio/sha256-simd/test-architectures.sh | 15 - .../github.com/mitchellh/go-homedir/go.mod | 1 - mantle/vendor/github.com/pborman/uuid/go.mod | 3 - mantle/vendor/github.com/pborman/uuid/go.sum | 2 - mantle/vendor/github.com/rs/xid/.appveyor.yml | 27 - mantle/vendor/github.com/rs/xid/.travis.yml | 8 - mantle/vendor/github.com/rs/xid/LICENSE | 19 - mantle/vendor/github.com/rs/xid/README.md | 112 - mantle/vendor/github.com/rs/xid/go.mod | 1 - .../vendor/github.com/rs/xid/hostid_darwin.go | 9 - .../github.com/rs/xid/hostid_fallback.go | 9 - .../github.com/rs/xid/hostid_freebsd.go | 9 - .../vendor/github.com/rs/xid/hostid_linux.go | 10 - .../github.com/rs/xid/hostid_windows.go | 38 - mantle/vendor/github.com/rs/xid/id.go | 365 - .../github.com/sirupsen/logrus/README.md | 4 +- .../github.com/sirupsen/logrus/buffer_pool.go | 9 - .../github.com/sirupsen/logrus/entry.go | 21 +- .../vendor/github.com/sirupsen/logrus/go.mod | 10 - .../vendor/github.com/sirupsen/logrus/go.sum | 8 - .../github.com/sirupsen/logrus/logger.go | 13 + .../vendor/github.com/spf13/cobra/.gitignore | 2 +- .../github.com/spf13/cobra/.golangci.yml | 48 + .../vendor/github.com/spf13/cobra/.travis.yml | 28 - .../vendor/github.com/spf13/cobra/CONDUCT.md | 37 + .../github.com/spf13/cobra/CONTRIBUTING.md | 50 + .../vendor/github.com/spf13/cobra/MAINTAINERS | 13 + mantle/vendor/github.com/spf13/cobra/Makefile | 35 + .../vendor/github.com/spf13/cobra/README.md | 711 +- .../github.com/spf13/cobra/active_help.go | 49 + .../github.com/spf13/cobra/active_help.md | 157 + mantle/vendor/github.com/spf13/cobra/args.go | 22 +- .../spf13/cobra/bash_completions.go | 320 +- .../spf13/cobra/bash_completions.md | 181 +- .../spf13/cobra/bash_completionsV2.go | 369 + mantle/vendor/github.com/spf13/cobra/cobra.go | 15 + .../vendor/github.com/spf13/cobra/command.go | 241 +- .../github.com/spf13/cobra/command_notwin.go | 1 + .../github.com/spf13/cobra/command_win.go | 1 + .../github.com/spf13/cobra/completions.go | 832 ++ .../spf13/cobra/fish_completions.go | 220 + .../spf13/cobra/fish_completions.md | 4 + .../github.com/spf13/cobra/flag_groups.go | 223 + mantle/vendor/github.com/spf13/cobra/go.mod | 12 - mantle/vendor/github.com/spf13/cobra/go.sum | 149 - .../spf13/cobra/powershell_completions.go | 326 +- .../spf13/cobra/powershell_completions.md | 13 +- .../spf13/cobra/projects_using_cobra.md | 54 + .../spf13/cobra/shell_completions.go | 53 +- .../spf13/cobra/shell_completions.md | 548 + .../github.com/spf13/cobra/user_guide.md | 666 + .../github.com/spf13/cobra/zsh_completions.go | 549 +- .../github.com/spf13/cobra/zsh_completions.md | 87 +- mantle/vendor/github.com/spf13/pflag/go.mod | 3 - mantle/vendor/github.com/spf13/pflag/go.sum | 0 .../testify/assert/assertion_compare.go | 76 +- .../assert/assertion_compare_can_convert.go | 16 + .../assert/assertion_compare_legacy.go | 16 + .../testify/assert/assertion_format.go | 22 + .../testify/assert/assertion_forward.go | 44 + .../testify/assert/assertion_order.go | 8 +- .../stretchr/testify/assert/assertions.go | 190 +- .../vendor/github.com/ulikunitz/xz/.gitignore | 25 - mantle/vendor/github.com/ulikunitz/xz/LICENSE | 26 - .../vendor/github.com/ulikunitz/xz/README.md | 73 - .../github.com/ulikunitz/xz/SECURITY.md | 10 - mantle/vendor/github.com/ulikunitz/xz/TODO.md | 363 - mantle/vendor/github.com/ulikunitz/xz/bits.go | 79 - mantle/vendor/github.com/ulikunitz/xz/crc.go | 54 - .../vendor/github.com/ulikunitz/xz/format.go | 721 - .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 96 -> 0 bytes mantle/vendor/github.com/ulikunitz/xz/fox.xz | Bin 104 -> 0 bytes mantle/vendor/github.com/ulikunitz/xz/go.mod | 3 - .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 - .../ulikunitz/xz/internal/hash/doc.go | 14 - .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 - .../ulikunitz/xz/internal/hash/roller.go | 29 - .../ulikunitz/xz/internal/xlog/xlog.go | 457 - .../github.com/ulikunitz/xz/lzma/bintree.go | 522 - .../github.com/ulikunitz/xz/lzma/bitops.go | 47 - .../github.com/ulikunitz/xz/lzma/breader.go | 39 - .../github.com/ulikunitz/xz/lzma/buffer.go | 171 - .../ulikunitz/xz/lzma/bytewriter.go | 37 - .../github.com/ulikunitz/xz/lzma/decoder.go | 277 - .../ulikunitz/xz/lzma/decoderdict.go | 128 - .../ulikunitz/xz/lzma/directcodec.go | 38 - .../github.com/ulikunitz/xz/lzma/distcodec.go | 140 - .../github.com/ulikunitz/xz/lzma/encoder.go | 268 - .../ulikunitz/xz/lzma/encoderdict.go | 149 - .../github.com/ulikunitz/xz/lzma/fox.lzma | Bin 67 -> 0 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 - .../github.com/ulikunitz/xz/lzma/header.go | 167 - .../github.com/ulikunitz/xz/lzma/header2.go | 398 - .../ulikunitz/xz/lzma/lengthcodec.go | 116 - .../ulikunitz/xz/lzma/literalcodec.go | 125 - .../ulikunitz/xz/lzma/matchalgorithm.go | 52 - .../github.com/ulikunitz/xz/lzma/operation.go | 55 - .../github.com/ulikunitz/xz/lzma/prob.go | 53 - .../ulikunitz/xz/lzma/properties.go | 69 - .../ulikunitz/xz/lzma/rangecodec.go | 222 - .../github.com/ulikunitz/xz/lzma/reader.go | 100 - .../github.com/ulikunitz/xz/lzma/reader2.go | 231 - .../github.com/ulikunitz/xz/lzma/state.go | 145 - .../ulikunitz/xz/lzma/treecodecs.go | 133 - .../github.com/ulikunitz/xz/lzma/writer.go | 209 - .../github.com/ulikunitz/xz/lzma/writer2.go | 305 - .../github.com/ulikunitz/xz/lzmafilter.go | 117 - .../vendor/github.com/ulikunitz/xz/make-docs | 5 - .../github.com/ulikunitz/xz/none-check.go | 23 - .../vendor/github.com/ulikunitz/xz/reader.go | 359 - .../vendor/github.com/ulikunitz/xz/writer.go | 399 - .../github.com/xeipuuv/gojsonschema/go.mod | 7 - .../github.com/xeipuuv/gojsonschema/go.sum | 11 - mantle/vendor/go.opencensus.io/go.mod | 15 - mantle/vendor/go.opencensus.io/go.sum | 74 - .../golang.org/x/crypto/argon2/argon2.go | 285 - .../golang.org/x/crypto/argon2/blake2b.go | 53 - .../x/crypto/argon2/blamka_amd64.go | 61 - .../golang.org/x/crypto/argon2/blamka_amd64.s | 244 - .../x/crypto/argon2/blamka_generic.go | 163 - .../golang.org/x/crypto/argon2/blamka_ref.go | 16 - .../golang.org/x/crypto/blake2b/blake2b.go | 291 - .../x/crypto/blake2b/blake2bAVX2_amd64.go | 38 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 745 - .../x/crypto/blake2b/blake2b_amd64.go | 25 - .../x/crypto/blake2b/blake2b_amd64.s | 279 - .../x/crypto/blake2b/blake2b_generic.go | 182 - .../x/crypto/blake2b/blake2b_ref.go | 12 - .../golang.org/x/crypto/blake2b/blake2x.go | 177 - .../golang.org/x/crypto/blake2b/register.go | 33 - .../vendor/golang.org/x/crypto/cast5/cast5.go | 533 - .../golang.org/x/crypto/ed25519/ed25519.go | 188 +- .../x/crypto/ed25519/ed25519_go113.go | 74 - .../ed25519/internal/edwards25519/const.go | 1422 -- .../internal/edwards25519/edwards25519.go | 1793 --- .../x/crypto/openpgp/armor/armor.go | 230 - .../x/crypto/openpgp/armor/encode.go | 160 - .../x/crypto/openpgp/canonical_text.go | 59 - .../x/crypto/openpgp/elgamal/elgamal.go | 130 - .../x/crypto/openpgp/errors/errors.go | 78 - .../golang.org/x/crypto/openpgp/keys.go | 693 - .../x/crypto/openpgp/packet/compressed.go | 123 - .../x/crypto/openpgp/packet/config.go | 91 - .../x/crypto/openpgp/packet/encrypted_key.go | 208 - .../x/crypto/openpgp/packet/literal.go | 89 - .../x/crypto/openpgp/packet/ocfb.go | 143 - .../openpgp/packet/one_pass_signature.go | 73 - .../x/crypto/openpgp/packet/opaque.go | 162 - .../x/crypto/openpgp/packet/packet.go | 590 - .../x/crypto/openpgp/packet/private_key.go | 385 - .../x/crypto/openpgp/packet/public_key.go | 753 - .../x/crypto/openpgp/packet/public_key_v3.go | 279 - .../x/crypto/openpgp/packet/reader.go | 76 - .../x/crypto/openpgp/packet/signature.go | 731 - .../x/crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 155 - .../openpgp/packet/symmetrically_encrypted.go | 290 - .../x/crypto/openpgp/packet/userattribute.go | 91 - .../x/crypto/openpgp/packet/userid.go | 160 - .../golang.org/x/crypto/openpgp/read.go | 448 - .../golang.org/x/crypto/openpgp/s2k/s2k.go | 279 - .../golang.org/x/crypto/openpgp/write.go | 418 - .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 - .../golang.org/x/crypto/pkcs12/crypto.go | 131 - .../golang.org/x/crypto/pkcs12/errors.go | 23 - .../x/crypto/pkcs12/internal/rc2/rc2.go | 271 - .../vendor/golang.org/x/crypto/pkcs12/mac.go | 45 - .../golang.org/x/crypto/pkcs12/pbkdf.go | 170 - .../golang.org/x/crypto/pkcs12/pkcs12.go | 360 - .../golang.org/x/crypto/pkcs12/safebags.go | 57 - .../golang.org/x/crypto/ssh/agent/client.go | 24 +- .../golang.org/x/crypto/ssh/agent/keyring.go | 6 +- .../vendor/golang.org/x/crypto/ssh/certs.go | 85 +- .../vendor/golang.org/x/crypto/ssh/cipher.go | 8 + .../vendor/golang.org/x/crypto/ssh/client.go | 18 +- .../golang.org/x/crypto/ssh/client_auth.go | 132 +- .../vendor/golang.org/x/crypto/ssh/common.go | 76 +- .../golang.org/x/crypto/ssh/handshake.go | 81 +- mantle/vendor/golang.org/x/crypto/ssh/kex.go | 186 +- mantle/vendor/golang.org/x/crypto/ssh/keys.go | 147 +- .../golang.org/x/crypto/ssh/messages.go | 21 +- .../vendor/golang.org/x/crypto/ssh/server.go | 46 +- .../vendor/golang.org/x/crypto/ssh/session.go | 1 + .../x/crypto/ssh/terminal/terminal.go | 76 - .../golang.org/x/crypto/ssh/transport.go | 10 +- mantle/vendor/golang.org/x/net/AUTHORS | 3 + mantle/vendor/golang.org/x/net/CONTRIBUTORS | 3 + .../golang.org/x/net/context/context.go | 6 +- .../vendor/golang.org/x/net/context/go17.go | 10 +- .../golang.org/x/net/context/pre_go17.go | 10 +- .../golang.org/x/net/http/httpguts/httplex.go | 54 +- .../x/net/http2/client_conn_pool.go | 3 +- .../vendor/golang.org/x/net/http2/errors.go | 2 +- mantle/vendor/golang.org/x/net/http2/frame.go | 3 +- .../golang.org/x/net/http2/hpack/huffman.go | 87 +- mantle/vendor/golang.org/x/net/http2/http2.go | 14 +- .../vendor/golang.org/x/net/http2/server.go | 145 +- .../golang.org/x/net/http2/transport.go | 137 +- .../golang.org/x/net/http2/writesched.go | 4 +- .../x/net/http2/writesched_priority.go | 9 +- .../x/net/http2/writesched_random.go | 6 +- .../vendor/golang.org/x/net/idna/trieval.go | 34 +- .../golang.org/x/net/publicsuffix/list.go | 182 - .../golang.org/x/net/publicsuffix/table.go | 10585 -------------- .../golang.org/x/net/websocket/websocket.go | 5 +- mantle/vendor/golang.org/x/oauth2/go.mod | 9 - mantle/vendor/golang.org/x/oauth2/go.sum | 361 - .../vendor/golang.org/x/sys/cpu/cpu_arm64.go | 7 +- .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 - .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 4 +- mantle/vendor/golang.org/x/sys/unix/mkall.sh | 18 +- .../golang.org/x/sys/unix/syscall_aix.go | 57 +- .../golang.org/x/sys/unix/syscall_bsd.go | 2 +- .../golang.org/x/sys/unix/syscall_linux.go | 2 +- .../x/sys/unix/syscall_openbsd_libc.go | 27 - .../x/sys/unix/zerrors_linux_386.go | 4 +- .../x/sys/unix/zerrors_linux_amd64.go | 4 +- .../x/sys/unix/zerrors_linux_arm.go | 4 +- .../x/sys/unix/zerrors_linux_arm64.go | 4 +- .../x/sys/unix/zerrors_linux_loong64.go | 4 +- .../x/sys/unix/zerrors_linux_mips.go | 4 +- .../x/sys/unix/zerrors_linux_mips64.go | 4 +- .../x/sys/unix/zerrors_linux_mips64le.go | 4 +- .../x/sys/unix/zerrors_linux_mipsle.go | 4 +- .../x/sys/unix/zerrors_linux_ppc.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64.go | 4 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 4 +- .../x/sys/unix/zerrors_linux_riscv64.go | 4 +- .../x/sys/unix/zerrors_linux_s390x.go | 4 +- .../x/sys/unix/zerrors_linux_sparc64.go | 4 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 2 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 2 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 2 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 2 +- .../x/sys/unix/zsyscall_openbsd_386.go | 798 +- .../x/sys/unix/zsyscall_openbsd_386.s | 796 -- .../x/sys/unix/zsyscall_openbsd_amd64.go | 798 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 796 -- .../x/sys/unix/zsyscall_openbsd_arm64.go | 798 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 796 -- .../x/sys/unix/zsysnum_linux_386.go | 2 +- .../x/sys/unix/zsysnum_linux_amd64.go | 2 +- .../x/sys/unix/zsysnum_linux_arm.go | 2 +- .../x/sys/unix/zsysnum_linux_arm64.go | 2 +- .../x/sys/unix/zsysnum_linux_loong64.go | 2 +- .../x/sys/unix/zsysnum_linux_mips.go | 2 +- .../x/sys/unix/zsysnum_linux_mips64.go | 2 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 2 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 2 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 2 +- .../x/sys/unix/zsysnum_linux_s390x.go | 2 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 2 +- .../x/sys/unix/zsysnum_openbsd_386.go | 1 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - .../golang.org/x/sys/unix/ztypes_linux.go | 8 - .../golang.org/x/sys/unix/ztypes_linux_386.go | 2 +- .../x/sys/unix/ztypes_linux_amd64.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2 +- .../x/sys/unix/ztypes_linux_arm64.go | 2 +- .../x/sys/unix/ztypes_linux_loong64.go | 2 +- .../x/sys/unix/ztypes_linux_mips.go | 2 +- .../x/sys/unix/ztypes_linux_mips64.go | 2 +- .../x/sys/unix/ztypes_linux_mips64le.go | 2 +- .../x/sys/unix/ztypes_linux_mipsle.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 2 +- .../x/sys/unix/ztypes_linux_riscv64.go | 2 +- .../x/sys/unix/ztypes_linux_s390x.go | 2 +- .../x/sys/unix/ztypes_linux_sparc64.go | 2 +- .../x/sys/windows/syscall_windows.go | 95 - .../x/sys/windows/zsyscall_windows.go | 9 - mantle/vendor/golang.org/x/term/README.md | 2 - .../vendor/golang.org/x/term/codereview.cfg | 1 - mantle/vendor/golang.org/x/term/go.mod | 5 - mantle/vendor/golang.org/x/term/go.sum | 2 - mantle/vendor/golang.org/x/term/term.go | 6 +- .../vendor/golang.org/x/term/term_solaris.go | 111 + mantle/vendor/golang.org/x/term/term_unix.go | 3 +- .../{term_unix_other.go => term_unix_aix.go} | 5 +- .../vendor/golang.org/x/term/term_unix_bsd.go | 1 - .../not_go118.go => term/term_unix_linux.go} | 17 +- .../http2/go118.go => term/term_unix_zos.go} | 17 +- .../golang.org/x/term/term_unsupported.go | 1 - .../vendor/google.golang.org/appengine/go.mod | 9 - .../vendor/google.golang.org/appengine/go.sum | 11 - mantle/vendor/google.golang.org/grpc/go.mod | 15 - mantle/vendor/google.golang.org/grpc/go.sum | 68 - mantle/vendor/gopkg.in/yaml.v2/go.mod | 5 - mantle/vendor/gopkg.in/yaml.v3/decode.go | 78 +- mantle/vendor/gopkg.in/yaml.v3/go.mod | 5 - mantle/vendor/gopkg.in/yaml.v3/parserc.go | 11 +- mantle/vendor/modules.txt | 174 +- ...rpm-ostree-2022.16-3.oe2203sp2.aarch64.rpm | Bin 0 -> 3289873 bytes .../rpm-ostree-2022.16-3.oe2203sp2.x86_64.rpm | Bin 0 -> 3863601 bytes .../rpm-ostree-2022.8-3.oe2203.aarch64.rpm | Bin 2507701 -> 0 bytes .../rpm-ostree-2022.8-3.oe2203.x86_64.rpm | Bin 2971309 -> 0 bytes ...tree-devel-2022.16-3.oe2203sp2.aarch64.rpm | Bin 0 -> 28817 bytes ...stree-devel-2022.16-3.oe2203sp2.x86_64.rpm | Bin 0 -> 28837 bytes ...m-ostree-devel-2022.8-3.oe2203.aarch64.rpm | Bin 29841 -> 0 bytes ...pm-ostree-devel-2022.8-3.oe2203.x86_64.rpm | Bin 29853 -> 0 bytes src/cmd-buildextend-live | 10 +- src/deps.txt | 2 +- 726 files changed, 12019 insertions(+), 91945 deletions(-) create mode 100644 go.work rename mantle/cmd/ore/azure/{create-image-arm.go => delete-blob.go} (36%) delete mode 100644 mantle/cmd/ore/azure/replicate-image.go delete mode 100644 mantle/cmd/ore/azure/upload-blob-arm.go delete mode 100644 mantle/cmd/ore/gcloud/index.go delete mode 100644 mantle/cmd/ore/gcloud/sync.go delete mode 100644 mantle/cmd/plume/fcos.go delete mode 100644 mantle/cmd/plume/fedora.go delete mode 100644 mantle/cmd/plume/prerelease.go delete mode 100644 mantle/cmd/plume/types.go delete mode 100644 mantle/platform/api/aws/ami.go delete mode 100644 mantle/platform/api/azure/replication.go delete mode 100644 mantle/sdk/const.go delete mode 100644 mantle/sdk/download.go delete mode 100644 mantle/sdk/repo/manifest.go delete mode 100644 mantle/sdk/repo/manifest_test.go delete mode 100644 mantle/sdk/verify.go delete mode 100644 mantle/storage/bucket.go delete mode 100644 mantle/storage/bucket_test.go delete mode 100644 mantle/storage/index/indexer.go delete mode 100644 mantle/storage/index/job.go delete mode 100644 mantle/storage/index/sync.go delete mode 100644 mantle/storage/index/tree.go delete mode 100644 mantle/storage/object.go delete mode 100644 mantle/storage/object_test.go delete mode 100644 mantle/storage/storage.go delete mode 100644 mantle/storage/sync.go delete mode 100644 mantle/system/anonfile_linux.go delete mode 100644 mantle/system/anonfile_linux_test.go delete mode 100644 mantle/system/hostname.go delete mode 100644 mantle/system/mount_linux.go delete mode 100644 mantle/system/symlink.go delete mode 100644 mantle/system/user/user.go delete mode 100644 mantle/system/user/user_test.go rename mantle/{sdk => util}/distros.go (88%) rename mantle/{sdk => util}/repo.go (90%) delete mode 100644 mantle/util/xz.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/README.md delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/client.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/errors.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/http.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/location/client.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/location/entities.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/operations.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/publishSettings.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go delete mode 100644 mantle/vendor/github.com/Azure/azure-sdk-for-go/management/util.go delete mode 100644 mantle/vendor/github.com/IBM-Cloud/bluemix-go/go.mod delete mode 100644 mantle/vendor/github.com/IBM-Cloud/bluemix-go/go.sum rename mantle/{storage/error.go => vendor/github.com/coreos/butane/config/flatcar/v1_0/schema.go} (70%) create mode 100644 mantle/vendor/github.com/coreos/butane/config/flatcar/v1_0/translate.go rename mantle/{system/arch.go => vendor/github.com/coreos/butane/config/flatcar/v1_1_exp/schema.go} (70%) create mode 100644 mantle/vendor/github.com/coreos/butane/config/flatcar/v1_1_exp/translate.go rename mantle/{cmd/ore/azure/share-image.go => vendor/github.com/coreos/butane/config/openshift/v4_11/result/schema.go} (35%) rename mantle/{storage/index/set.go => vendor/github.com/coreos/butane/config/openshift/v4_11/schema.go} (45%) rename mantle/vendor/github.com/coreos/butane/config/openshift/{v4_11_exp => v4_11}/translate.go (89%) rename mantle/vendor/github.com/coreos/butane/config/openshift/{v4_11_exp => v4_11}/validate.go (98%) rename mantle/{cmd/ore/azure/unreplicate-image.go => vendor/github.com/coreos/butane/config/openshift/v4_12/result/schema.go} (35%) rename mantle/vendor/github.com/{minio/sha256-simd/cpuid_linux_arm64.go => coreos/butane/config/openshift/v4_12/schema.go} (45%) create mode 100644 mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/translate.go rename mantle/{util/bunzip.go => vendor/github.com/coreos/butane/config/openshift/v4_12/validate.go} (43%) rename mantle/vendor/github.com/coreos/butane/config/openshift/{v4_11_exp => v4_13_exp}/result/schema.go (100%) rename mantle/vendor/github.com/coreos/butane/config/openshift/{v4_11_exp => v4_13_exp}/schema.go (98%) create mode 100644 mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/translate.go rename mantle/{system/mount_linux_test.go => vendor/github.com/coreos/butane/config/openshift/v4_13_exp/validate.go} (43%) delete mode 100644 mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/cosa_reader.go delete mode 100644 mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/schema_doc.go rename mantle/vendor/github.com/{minio/sha256-simd => coreos/coreos-assembler}/LICENSE (99%) rename mantle/vendor/github.com/coreos/{coreos-assembler-schema/cosa => coreos-assembler/pkg/builds}/build.go (88%) rename mantle/vendor/github.com/coreos/{coreos-assembler-schema/cosa => coreos-assembler/pkg/builds}/builds.go (96%) rename mantle/vendor/github.com/coreos/{coreos-assembler-schema/cosa => coreos-assembler/pkg/builds}/cosa_v1.go (67%) rename mantle/vendor/github.com/coreos/{coreos-assembler-schema/cosa => coreos-assembler/pkg/builds}/schema.go (99%) create mode 100644 mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/schema_doc.go delete mode 100644 mantle/vendor/github.com/coreos/go-json/go.mod create mode 100644 mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go rename mantle/{sdk/version.go => vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go} (52%) rename mantle/vendor/github.com/{minio/sha256-simd/cpuid_other.go => coreos/go-systemd/v22/unit/section.go} (45%) delete mode 100644 mantle/vendor/github.com/coreos/ioprogress/LICENSE delete mode 100644 mantle/vendor/github.com/coreos/ioprogress/README.md delete mode 100644 mantle/vendor/github.com/coreos/ioprogress/draw.go delete mode 100644 mantle/vendor/github.com/coreos/ioprogress/reader.go delete mode 100644 mantle/vendor/github.com/digitalocean/go-libvirt/go.mod delete mode 100644 mantle/vendor/github.com/digitalocean/go-libvirt/go.sum delete mode 100644 mantle/vendor/github.com/digitalocean/godo/go.mod delete mode 100644 mantle/vendor/github.com/digitalocean/godo/go.sum delete mode 100644 mantle/vendor/github.com/dimchansky/utfbom/go.mod delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/.travis.yml delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/LICENSE delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/README.markdown delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/big.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/bigbytes.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/bytes.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/comma.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/commaf.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/ftoa.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/humanize.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/number.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/ordinals.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/si.go delete mode 100644 mantle/vendor/github.com/dustin/go-humanize/times.go delete mode 100644 mantle/vendor/github.com/godbus/dbus/v5/.travis.yml delete mode 100644 mantle/vendor/github.com/godbus/dbus/v5/go.mod delete mode 100644 mantle/vendor/github.com/godbus/dbus/v5/go.sum create mode 100644 mantle/vendor/github.com/godbus/dbus/v5/sequence.go create mode 100644 mantle/vendor/github.com/godbus/dbus/v5/sequential_handler.go delete mode 100644 mantle/vendor/github.com/google/uuid/go.mod delete mode 100644 mantle/vendor/github.com/googleapis/gax-go/v2/go.mod delete mode 100644 mantle/vendor/github.com/googleapis/gax-go/v2/go.sum delete mode 100644 mantle/vendor/github.com/gophercloud/gophercloud/go.mod delete mode 100644 mantle/vendor/github.com/gophercloud/gophercloud/go.sum delete mode 100644 mantle/vendor/github.com/jmespath/go-jmespath/go.mod delete mode 100644 mantle/vendor/github.com/jmespath/go-jmespath/go.sum delete mode 100644 mantle/vendor/github.com/json-iterator/go/go.mod delete mode 100644 mantle/vendor/github.com/json-iterator/go/go.sum delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/.gitignore delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/.travis.yml delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/LICENSE delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/README.md delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/cpuid.go delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/cpuid_386.s delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/cpuid_amd64.s delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/cpuid_arm64.s delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/detect_arm64.go delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/detect_intel.go delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/detect_ref.go delete mode 100644 mantle/vendor/github.com/klauspost/cpuid/go.mod delete mode 100644 mantle/vendor/github.com/minio/md5-simd/LICENSE delete mode 100644 mantle/vendor/github.com/minio/md5-simd/README.md delete mode 100644 mantle/vendor/github.com/minio/md5-simd/block-generic.go delete mode 100644 mantle/vendor/github.com/minio/md5-simd/block16_amd64.s delete mode 100644 mantle/vendor/github.com/minio/md5-simd/block8_amd64.s delete mode 100644 mantle/vendor/github.com/minio/md5-simd/block_amd64.go delete mode 100644 mantle/vendor/github.com/minio/md5-simd/go.mod delete mode 100644 mantle/vendor/github.com/minio/md5-simd/go.sum delete mode 100644 mantle/vendor/github.com/minio/md5-simd/md5-digest_amd64.go delete mode 100644 mantle/vendor/github.com/minio/md5-simd/md5-server_amd64.go delete mode 100644 mantle/vendor/github.com/minio/md5-simd/md5-server_fallback.go delete mode 100644 mantle/vendor/github.com/minio/md5-simd/md5-util_amd64.go delete mode 100644 mantle/vendor/github.com/minio/md5-simd/md5.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/.gitignore delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/.golangci.yml delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/CNAME delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/LICENSE delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/Makefile delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/NOTICE delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/README.md delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/README_zh_CN.md delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-compose-object.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-copy-object.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-datatypes.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-error-response.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-get-object-file.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-get-object.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-get-options.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-list.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-object-lock.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-object-retention.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-object-tagging.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-presigned.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-put-bucket.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-put-object-common.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-put-object.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-remove.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-select.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api-stat.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/api.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/bucket-cache.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/code_of_conduct.md delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/constants.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/core.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/functional_tests.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/go.mod delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/go.sum delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/hook-reader.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/post-policy.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/retry-continous.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/retry.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/s3-endpoints.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/s3-error.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/transport.go delete mode 100644 mantle/vendor/github.com/minio/minio-go/v7/utils.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/.gitignore delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/.travis.yml delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/README.md delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/appveyor.yml delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/cpuid.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/cpuid_386.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/cpuid_386.s delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/cpuid_amd64.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/cpuid_amd64.s delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/cpuid_arm.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/go.mod delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256block_amd64.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256block_arm64.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256block_arm64.s delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/sha256block_other.go delete mode 100644 mantle/vendor/github.com/minio/sha256-simd/test-architectures.sh delete mode 100644 mantle/vendor/github.com/mitchellh/go-homedir/go.mod delete mode 100644 mantle/vendor/github.com/pborman/uuid/go.mod delete mode 100644 mantle/vendor/github.com/pborman/uuid/go.sum delete mode 100644 mantle/vendor/github.com/rs/xid/.appveyor.yml delete mode 100644 mantle/vendor/github.com/rs/xid/.travis.yml delete mode 100644 mantle/vendor/github.com/rs/xid/LICENSE delete mode 100644 mantle/vendor/github.com/rs/xid/README.md delete mode 100644 mantle/vendor/github.com/rs/xid/go.mod delete mode 100644 mantle/vendor/github.com/rs/xid/hostid_darwin.go delete mode 100644 mantle/vendor/github.com/rs/xid/hostid_fallback.go delete mode 100644 mantle/vendor/github.com/rs/xid/hostid_freebsd.go delete mode 100644 mantle/vendor/github.com/rs/xid/hostid_linux.go delete mode 100644 mantle/vendor/github.com/rs/xid/hostid_windows.go delete mode 100644 mantle/vendor/github.com/rs/xid/id.go delete mode 100644 mantle/vendor/github.com/sirupsen/logrus/go.mod delete mode 100644 mantle/vendor/github.com/sirupsen/logrus/go.sum create mode 100644 mantle/vendor/github.com/spf13/cobra/.golangci.yml delete mode 100644 mantle/vendor/github.com/spf13/cobra/.travis.yml create mode 100644 mantle/vendor/github.com/spf13/cobra/CONDUCT.md create mode 100644 mantle/vendor/github.com/spf13/cobra/CONTRIBUTING.md create mode 100644 mantle/vendor/github.com/spf13/cobra/MAINTAINERS create mode 100644 mantle/vendor/github.com/spf13/cobra/Makefile create mode 100644 mantle/vendor/github.com/spf13/cobra/active_help.go create mode 100644 mantle/vendor/github.com/spf13/cobra/active_help.md create mode 100644 mantle/vendor/github.com/spf13/cobra/bash_completionsV2.go create mode 100644 mantle/vendor/github.com/spf13/cobra/completions.go create mode 100644 mantle/vendor/github.com/spf13/cobra/fish_completions.go create mode 100644 mantle/vendor/github.com/spf13/cobra/fish_completions.md create mode 100644 mantle/vendor/github.com/spf13/cobra/flag_groups.go delete mode 100644 mantle/vendor/github.com/spf13/cobra/go.mod delete mode 100644 mantle/vendor/github.com/spf13/cobra/go.sum create mode 100644 mantle/vendor/github.com/spf13/cobra/projects_using_cobra.md create mode 100644 mantle/vendor/github.com/spf13/cobra/shell_completions.md create mode 100644 mantle/vendor/github.com/spf13/cobra/user_guide.md delete mode 100644 mantle/vendor/github.com/spf13/pflag/go.mod delete mode 100644 mantle/vendor/github.com/spf13/pflag/go.sum create mode 100644 mantle/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go create mode 100644 mantle/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/.gitignore delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/LICENSE delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/README.md delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/SECURITY.md delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/TODO.md delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/bits.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/crc.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/format.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/fox-check-none.xz delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/fox.xz delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/go.mod delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/internal/hash/doc.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/internal/hash/roller.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/bintree.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/bitops.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/breader.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/buffer.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/decoder.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/directcodec.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/distcodec.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/encoder.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/fox.lzma delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/hashtable.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/header.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/header2.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/operation.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/prob.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/properties.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/reader.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/reader2.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/state.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/writer.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzma/writer2.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/lzmafilter.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/make-docs delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/none-check.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/reader.go delete mode 100644 mantle/vendor/github.com/ulikunitz/xz/writer.go delete mode 100644 mantle/vendor/github.com/xeipuuv/gojsonschema/go.mod delete mode 100644 mantle/vendor/github.com/xeipuuv/gojsonschema/go.sum delete mode 100644 mantle/vendor/go.opencensus.io/go.mod delete mode 100644 mantle/vendor/go.opencensus.io/go.sum delete mode 100644 mantle/vendor/golang.org/x/crypto/argon2/argon2.go delete mode 100644 mantle/vendor/golang.org/x/crypto/argon2/blake2b.go delete mode 100644 mantle/vendor/golang.org/x/crypto/argon2/blamka_amd64.go delete mode 100644 mantle/vendor/golang.org/x/crypto/argon2/blamka_amd64.s delete mode 100644 mantle/vendor/golang.org/x/crypto/argon2/blamka_generic.go delete mode 100644 mantle/vendor/golang.org/x/crypto/argon2/blamka_ref.go delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/blake2x.go delete mode 100644 mantle/vendor/golang.org/x/crypto/blake2b/register.go delete mode 100644 mantle/vendor/golang.org/x/crypto/cast5/cast5.go delete mode 100644 mantle/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go delete mode 100644 mantle/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 mantle/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/armor/armor.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/armor/encode.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/canonical_text.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/errors/errors.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/keys.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/compressed.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/config.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/literal.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/opaque.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/packet.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/private_key.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/public_key.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/reader.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/signature.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/packet/userid.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/read.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go delete mode 100644 mantle/vendor/golang.org/x/crypto/openpgp/write.go delete mode 100644 mantle/vendor/golang.org/x/crypto/pkcs12/bmp-string.go delete mode 100644 mantle/vendor/golang.org/x/crypto/pkcs12/crypto.go delete mode 100644 mantle/vendor/golang.org/x/crypto/pkcs12/errors.go delete mode 100644 mantle/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go delete mode 100644 mantle/vendor/golang.org/x/crypto/pkcs12/mac.go delete mode 100644 mantle/vendor/golang.org/x/crypto/pkcs12/pbkdf.go delete mode 100644 mantle/vendor/golang.org/x/crypto/pkcs12/pkcs12.go delete mode 100644 mantle/vendor/golang.org/x/crypto/pkcs12/safebags.go delete mode 100644 mantle/vendor/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 mantle/vendor/golang.org/x/net/AUTHORS create mode 100644 mantle/vendor/golang.org/x/net/CONTRIBUTORS delete mode 100644 mantle/vendor/golang.org/x/net/publicsuffix/list.go delete mode 100644 mantle/vendor/golang.org/x/net/publicsuffix/table.go delete mode 100644 mantle/vendor/golang.org/x/oauth2/go.mod delete mode 100644 mantle/vendor/golang.org/x/oauth2/go.sum delete mode 100644 mantle/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go delete mode 100644 mantle/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s delete mode 100644 mantle/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go delete mode 100644 mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s delete mode 100644 mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s delete mode 100644 mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s delete mode 100644 mantle/vendor/golang.org/x/term/codereview.cfg delete mode 100644 mantle/vendor/golang.org/x/term/go.mod delete mode 100644 mantle/vendor/golang.org/x/term/go.sum create mode 100644 mantle/vendor/golang.org/x/term/term_solaris.go rename mantle/vendor/golang.org/x/term/{term_unix_other.go => term_unix_aix.go} (63%) rename mantle/vendor/golang.org/x/{net/http2/not_go118.go => term/term_unix_linux.go} (34%) rename mantle/vendor/golang.org/x/{net/http2/go118.go => term/term_unix_zos.go} (33%) delete mode 100644 mantle/vendor/google.golang.org/appengine/go.mod delete mode 100644 mantle/vendor/google.golang.org/appengine/go.sum delete mode 100644 mantle/vendor/google.golang.org/grpc/go.mod delete mode 100644 mantle/vendor/google.golang.org/grpc/go.sum delete mode 100644 mantle/vendor/gopkg.in/yaml.v2/go.mod delete mode 100644 mantle/vendor/gopkg.in/yaml.v3/go.mod create mode 100644 rpm-package/rpm-ostree-2022.16-3.oe2203sp2.aarch64.rpm create mode 100644 rpm-package/rpm-ostree-2022.16-3.oe2203sp2.x86_64.rpm delete mode 100644 rpm-package/rpm-ostree-2022.8-3.oe2203.aarch64.rpm delete mode 100644 rpm-package/rpm-ostree-2022.8-3.oe2203.x86_64.rpm create mode 100644 rpm-package/rpm-ostree-devel-2022.16-3.oe2203sp2.aarch64.rpm create mode 100644 rpm-package/rpm-ostree-devel-2022.16-3.oe2203sp2.x86_64.rpm delete mode 100644 rpm-package/rpm-ostree-devel-2022.8-3.oe2203.aarch64.rpm delete mode 100644 rpm-package/rpm-ostree-devel-2022.8-3.oe2203.x86_64.rpm diff --git a/Dockerfile b/Dockerfile index 258f17e6..3b3dfc26 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ #FROM registry.fedoraproject.org/fedora:35 -FROM openeuler-22.03-lts:latest +FROM openeuler/openeuler:22.03-lts WORKDIR /root/containerbuild # Keep this Dockerfile idempotent for local development rebuild use cases. diff --git a/build.sh b/build.sh index f8e54055..93fd3116 100755 --- a/build.sh +++ b/build.sh @@ -46,7 +46,7 @@ install_rpms() { arch=$(uname -m) case $arch in - "x86_64") yum install virglrenderer;; + "x86_64") yum install -y virglrenderer;; *) echo "Only x86_64 need to install virglrenderer additionally" esac @@ -58,8 +58,8 @@ install_rpms() { yum install -y libsolv rpm-devel grubby initscripts iptables nftables python3-setuptools linux-firmware bubblewrap json-c ostree json-glib polkit-libs ostree-devel dnf-plugins-core container-selinux oci-runtime case $arch in - "x86_64") rpm -iUh libsolv-0.7.22-1.x86_64.rpm libsolv-devel-0.7.22-1.x86_64.rpm kernel-5.10.0-60.41.0.73.oe2203.x86_64.rpm kernel-headers-5.10.0-60.41.0.73.oe2203.x86_64.rpm buildah-1.26.1-1.x86_64.rpm butane-0.14.0-1.oe2203.x86_64.rpm dumb-init-1.2.5-4.oe2203.x86_64.rpm python3-semver-2.10.2-2.oe2203.noarch.rpm containers-common-1-1.oe2203.noarch.rpm netavark-1.0.2-1.x86_64.rpm rpm-ostree-2022.8-3.oe2203.x86_64.rpm rpm-ostree-devel-2022.8-3.oe2203.x86_64.rpm supermin-5.3.2-1.x86_64.rpm;; - "aarch64") rpm -iUh libsolv-0.7.22-1.aarch64.rpm libsolv-devel-0.7.22-1.aarch64.rpm kernel-5.10.0-118.0.0.64.oe2203.aarch64.rpm kernel-headers-5.10.0-118.0.0.64.oe2203.aarch64.rpm buildah-1.26.1-1.oe2203.aarch64.rpm butane-0.14.0-2.oe2203.aarch64.rpm dumb-init-1.2.5-1.oe2203.aarch64.rpm python3-semver-2.10.2-2.oe2203.noarch.rpm containers-common-1-1.oe2203.noarch.rpm netavark-1.0.2-1.oe2203.aarch64.rpm rpm-ostree-2022.8-3.oe2203.aarch64.rpm rpm-ostree-devel-2022.8-3.oe2203.aarch64.rpm supermin-5.3.2-1.oe2203.aarch64.rpm;; + "x86_64") rpm -iUh libsolv-0.7.22-1.x86_64.rpm libsolv-devel-0.7.22-1.x86_64.rpm kernel-5.10.0-60.41.0.73.oe2203.x86_64.rpm kernel-headers-5.10.0-60.41.0.73.oe2203.x86_64.rpm buildah-1.26.1-1.x86_64.rpm butane-0.14.0-1.oe2203.x86_64.rpm dumb-init-1.2.5-4.oe2203.x86_64.rpm python3-semver-2.10.2-2.oe2203.noarch.rpm containers-common-1-1.oe2203.noarch.rpm netavark-1.0.2-1.x86_64.rpm rpm-ostree-2022.16-3.oe2203sp2.x86_64.rpm rpm-ostree-devel-2022.16-3.oe2203sp2.x86_64.rpm supermin-5.3.2-1.x86_64.rpm;; + "aarch64") rpm -iUh libsolv-0.7.22-1.aarch64.rpm libsolv-devel-0.7.22-1.aarch64.rpm kernel-5.10.0-118.0.0.64.oe2203.aarch64.rpm kernel-headers-5.10.0-118.0.0.64.oe2203.aarch64.rpm buildah-1.26.1-1.oe2203.aarch64.rpm butane-0.14.0-2.oe2203.aarch64.rpm dumb-init-1.2.5-1.oe2203.aarch64.rpm python3-semver-2.10.2-2.oe2203.noarch.rpm containers-common-1-1.oe2203.noarch.rpm netavark-1.0.2-1.oe2203.aarch64.rpm rpm-ostree-2022.16-3.oe2203sp2.aarch64.rpm rpm-ostree-devel-2022.16-3.oe2203sp2.aarch64.rpm supermin-5.3.2-1.oe2203.aarch64.rpm;; *) fatal "Architecture ${arch} not supported" esac diff --git a/go.work b/go.work new file mode 100644 index 00000000..e74d72cf --- /dev/null +++ b/go.work @@ -0,0 +1,3 @@ +go 1.17 + +use ./mantle diff --git a/mantle/README.md b/mantle/README.md index d418b1b1..927757f1 100644 --- a/mantle/README.md +++ b/mantle/README.md @@ -158,8 +158,8 @@ azure, esx, ibmcloud and packet) within the latest SDK image. Ore mimics the und api for each cloud provider closely, so the interface for each cloud provider is different. See each providers `help` command for the available actions. -Note, when uploading to some cloud providers (e.g. gce) the image may need to be packaged -with a different --format (e.g. --format=gce) when running `image_to_vm.sh` +Note, when uploading to cloud platforms, be sure to use the image built for +that particular platform (with `cosa buildextend-...`). ### plume Plume is the Container Linux release utility. Releases are done in two stages, @@ -313,12 +313,9 @@ The JSON file exported to the variable `AZURE_AUTH_LOCATION` should be generated ``` ### gce -`gce` uses the `~/.boto` file. When the `gce` platform is first used, it will print -a link that can be used to log into your account with gce and get a verification code -you can paste in. This will populate the `.boto` file. - -See [Google Cloud Platform's Documentation](https://cloud.google.com/storage/docs/boto-gsutil) -for more information about the `.boto` file. +`gce` uses `~/.config/gce.json`, which contains a JSON-formatted service +account key. This can be downloaded from the Google Cloud console under +IAM > Service Accounts > [account] > Keys. ### openstack `openstack` uses `~/.config/openstack.json`. This can be configured manually: diff --git a/mantle/auth/azure.go b/mantle/auth/azure.go index 53ba62ae..c6023c52 100644 --- a/mantle/auth/azure.go +++ b/mantle/auth/azure.go @@ -15,7 +15,6 @@ package auth import ( - "bytes" "encoding/json" "fmt" "io/ioutil" @@ -44,66 +43,36 @@ type Options struct { SubscriptionName string SubscriptionID string - // Azure API endpoint. If unset, the Azure SDK default will be used. - ManagementURL string - ManagementCertificate []byte - // Azure Storage API endpoint suffix. If unset, the Azure SDK default will be used. StorageEndpointSuffix string } -type AzureEnvironment struct { - ActiveDirectoryEndpointURL string `json:"activeDirectoryEndpointUrl"` - ActiveDirectoryGraphAPIVersion string `json:"activeDirectoryGraphApiVersion"` - ActiveDirectoryGraphResourceID string `json:"activeDirectoryGraphResourceId"` - ActiveDirectoryResourceID string `json:"activeDirectoryResourceId"` - AzureDataLakeAnalyticsCatalogAndJobEndpointSuffix string `json:"azureDataLakeAnalyticsCatalogAndJobEndpointSuffix"` - AzureDataLakeStoreFileSystemEndpointSuffix string `json:"azureDataLakeStoreFileSystemEndpointSuffix"` - GalleryEndpointURL string `json:"galleryEndpointUrl"` - KeyVaultDNSSuffix string `json:"keyVaultDnsSuffix"` - ManagementEndpointURL string `json:"managementEndpointUrl"` - Name string `json:"name"` - PortalURL string `json:"portalUrl"` - PublishingProfileURL string `json:"publishingProfileUrl"` - ResourceManagerEndpointURL string `json:"resourceManagerEndpointUrl"` - SqlManagementEndpointURL string `json:"sqlManagementEndpointUrl"` - SqlServerHostnameSuffix string `json:"sqlServerHostnameSuffix"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` -} - -type AzureManagementCertificate struct { - Cert string `json:"cert"` - Key string `json:"key"` +type azureEnvironment struct { + Name string `json:"name"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` } -type AzureSubscription struct { - EnvironmentName string `json:"environmentName"` - ID string `json:"id"` - IsDefault bool `json:"isDefault"` - ManagementCertificate AzureManagementCertificate `json:"managementCertificate"` - ManagementEndpointURL string `json:"managementEndpointUrl"` - Name string `json:"name"` - RegisteredProviders []string `json:"registeredProviders"` - State string `json:"state"` +type azureSubscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + Name string `json:"name"` } // AzureProfile represents a parsed Azure Profile Configuration File. type AzureProfile struct { - Environments []AzureEnvironment `json:"environments"` - Subscriptions []AzureSubscription `json:"subscriptions"` + Environments []azureEnvironment `json:"environments"` + Subscriptions []azureSubscription `json:"subscriptions"` } // AsOptions converts all subscriptions into a slice of Options. // If there is an environment with a name matching the subscription, that environment's storage endpoint will be copied to the options. -func (ap *AzureProfile) AsOptions() []Options { +func (ap *AzureProfile) asOptions() []Options { var o []Options for _, sub := range ap.Subscriptions { newo := Options{ - SubscriptionName: sub.Name, - SubscriptionID: sub.ID, - ManagementURL: sub.ManagementEndpointURL, - ManagementCertificate: bytes.Join([][]byte{[]byte(sub.ManagementCertificate.Key), []byte(sub.ManagementCertificate.Cert)}, []byte("\n")), + SubscriptionName: sub.Name, + SubscriptionID: sub.ID, } // find the storage endpoint for the subscription @@ -124,7 +93,7 @@ func (ap *AzureProfile) AsOptions() []Options { // If the subscription name is "", the first subscription is returned. // If there are no subscriptions or the named subscription is not found, SubscriptionOptions returns nil. func (ap *AzureProfile) SubscriptionOptions(name string) *Options { - opts := ap.AsOptions() + opts := ap.asOptions() if len(opts) == 0 { return nil @@ -133,7 +102,7 @@ func (ap *AzureProfile) SubscriptionOptions(name string) *Options { if name == "" { return &opts[0] } else { - for _, o := range ap.AsOptions() { + for _, o := range opts { if o.SubscriptionName == name { return &o } @@ -156,7 +125,7 @@ func ReadAzureProfile(path string) (*AzureProfile, error) { path = filepath.Join(user.HomeDir, AzureProfilePath) } - contents, err := DecodeBOMFile(path) + contents, err := decodeBOMFile(path) if err != nil { return nil, err } @@ -173,7 +142,7 @@ func ReadAzureProfile(path string) (*AzureProfile, error) { return &ap, nil } -func DecodeBOMFile(path string) ([]byte, error) { +func decodeBOMFile(path string) ([]byte, error) { f, err := os.Open(path) if err != nil { return nil, err diff --git a/mantle/auth/google.go b/mantle/auth/google.go index a4b3396b..c36fccb2 100644 --- a/mantle/auth/google.go +++ b/mantle/auth/google.go @@ -17,11 +17,8 @@ package auth import ( "context" - "encoding/json" - "fmt" - "log" + "io/ioutil" "net/http" - "os" "os/user" "path/filepath" @@ -29,109 +26,11 @@ import ( "golang.org/x/oauth2/google" ) -// client registered under the coreos-gce-testing project as 'mantle' -var conf = oauth2.Config{ - ClientID: "1053977531921-s05q1c3kf23pdq86bmqv5qcga21c0ra3.apps.googleusercontent.com", - ClientSecret: "pgt0XUBTCfMwsqf2Q6cVdxTO", - Endpoint: oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://accounts.google.com/o/oauth2/token", - }, - RedirectURL: "urn:ietf:wg:oauth:2.0:oob", - Scopes: []string{"https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/compute"}, -} - -func writeCache(cachePath string, tok *oauth2.Token) error { - file, err := os.OpenFile(cachePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer file.Close() - - if err := json.NewEncoder(file).Encode(tok); err != nil { - return err - } - return nil -} - -func readCache(cachePath string) (*oauth2.Token, error) { - file, err := os.Open(cachePath) - if err != nil { - return nil, err - } - defer file.Close() - - tok := &oauth2.Token{} - if err := json.NewDecoder(file).Decode(tok); err != nil { - return nil, err - } - - // make sure token is refreshable - if tok != nil && !tok.Valid() { - ts := conf.TokenSource(context.TODO(), tok) - tok, err = ts.Token() - if err != nil || !tok.Valid() { - fmt.Printf("Could not refresh cached token: %v\n", err) - return nil, nil - } - } - return tok, nil -} - -func getToken() (*oauth2.Token, error) { - userInfo, err := user.Current() - if err != nil { - return nil, err - } - - cachePath := filepath.Join(userInfo.HomeDir, ".mantle-cache-google.json") - tok, err := readCache(cachePath) - if err != nil { - log.Printf("Error reading google token cache file: %v", err) - } - if tok == nil { - url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline) - fmt.Printf("Visit the URL for the auth dialog: %v\n", url) - fmt.Print("Enter token: ") - - var code string - if _, err := fmt.Scan(&code); err != nil { - return nil, err - } - tok, err = conf.Exchange(context.TODO(), code) - if err != nil { - return nil, err - } - err = writeCache(cachePath, tok) - if err != nil { - log.Printf("Error writing google token cache file: %v", err) - } - } - return tok, nil -} - -// GoogleClient provides an http.Client authorized with an oauth2 token -// that is automatically cached and refreshed from a file named -// '.mantle-cache-google.json'. This uses interactive oauth2 -// authorization and requires a user follow to follow a web link and -// paste in an authorization token. -func GoogleClient() (*http.Client, error) { - tok, err := getToken() - if err != nil { - return nil, err - } - return conf.Client(context.TODO(), tok), nil -} +const GCEConfigPath = ".config/gce.json" -// GoogleTokenSource provides an outh2.TokenSource authorized in the -// same manner as GoogleClient. -func GoogleTokenSource() (oauth2.TokenSource, error) { - tok, err := getToken() - if err != nil { - return nil, err - } - return conf.TokenSource(context.TODO(), tok), nil +var scopes = []string{ + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/compute", } // GoogleServiceClient fetchs a token from Google Compute Engine's @@ -151,12 +50,27 @@ func GoogleServiceTokenSource() oauth2.TokenSource { return google.ComputeTokenSource("") } +func GoogleClientFromKeyFile(path string, scope ...string) (*http.Client, error) { + if path == "" { + user, err := user.Current() + if err != nil { + return nil, err + } + path = filepath.Join(user.HomeDir, GCEConfigPath) + } + b, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return GoogleClientFromJSONKey(b, scope...) +} + // GoogleClientFromJSONKey provides an http.Client authorized with an // oauth2 token retrieved using a Google Developers service account's // private JSON key file. func GoogleClientFromJSONKey(jsonKey []byte, scope ...string) (*http.Client, error) { if scope == nil { - scope = conf.Scopes + scope = scopes } jwtConf, err := google.JWTConfigFromJSON(jsonKey, scope...) if err != nil { @@ -171,7 +85,7 @@ func GoogleClientFromJSONKey(jsonKey []byte, scope ...string) (*http.Client, err // authorized in the same manner as GoogleClientFromJSONKey. func GoogleTokenSourceFromJSONKey(jsonKey []byte, scope ...string) (oauth2.TokenSource, error) { if scope == nil { - scope = conf.Scopes + scope = scopes } jwtConf, err := google.JWTConfigFromJSON(jsonKey, scope...) diff --git a/mantle/cmd/kola/devshell.go b/mantle/cmd/kola/devshell.go index f6f549ee..229db5c7 100644 --- a/mantle/cmd/kola/devshell.go +++ b/mantle/cmd/kola/devshell.go @@ -31,11 +31,12 @@ import ( "syscall" "time" + "github.com/coreos/mantle/network" "github.com/coreos/mantle/platform" "github.com/coreos/mantle/platform/conf" "github.com/coreos/mantle/util" "github.com/pkg/errors" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" ) const devshellHostname = "cosa-devsh" @@ -50,22 +51,26 @@ func stripControlCharacters(s string) string { }, s) } -func displayStatusMsg(status, msg string) { +func displayStatusMsg(status, msg string, termMaxWidth int) { s := strings.TrimSpace(msg) if s == "" { return } - max := 100 - if len(s) > max { - s = s[:max] + s = fmt.Sprintf("[%s] %s", status, stripControlCharacters(s)) + if termMaxWidth > 0 && len(s) > termMaxWidth { + s = s[:termMaxWidth] } - fmt.Printf("\033[2K\r[%s] %s", status, stripControlCharacters(s)) + fmt.Printf("\033[2K\r%s", s) } func runDevShellSSH(ctx context.Context, builder *platform.QemuBuilder, conf *conf.Conf, sshCommand string) error { - if !terminal.IsTerminal(0) { + if !term.IsTerminal(0) { return fmt.Errorf("stdin is not a tty") } + termMaxWidth, _, err := term.GetSize(0) + if err != nil { + termMaxWidth = 100 + } tmpd, err := ioutil.TempDir("", "kola-devshell") if err != nil { @@ -74,12 +79,18 @@ func runDevShellSSH(ctx context.Context, builder *platform.QemuBuilder, conf *co defer os.RemoveAll(tmpd) // Define SSH key - sshPubKeyBuf, sshKeyPath, err := util.CreateSSHAuthorizedKey(tmpd) + agent, err := network.NewSSHAgent(network.NewRetryDialer()) if err != nil { return err } - keys := []string{strings.TrimSpace(string(sshPubKeyBuf))} - conf.AddAuthorizedKeys("core", keys) + defer agent.Close() + + keys, err := agent.List() + if err != nil { + return err + } + + conf.CopyKeys(keys) builder.SetConfig(conf) // errChan communicates errors from go routines @@ -89,7 +100,9 @@ func runDevShellSSH(ctx context.Context, builder *platform.QemuBuilder, conf *co // stateChan reports in-instance state such as shutdown, reboot, etc. stateChan := make(chan guestState) - watchJournal(builder, conf, stateChan, errChan) + if err = watchJournal(builder, conf, stateChan, errChan); err != nil { + return err + } // SerialPipe is the pipe output from the serial console. serialPipe, err := builder.SerialPipe() @@ -157,7 +170,7 @@ func runDevShellSSH(ctx context.Context, builder *platform.QemuBuilder, conf *co defer func() { fmt.Printf("\n\n") }() // make the console pretty again // Start the SSH client - sc := newSshClient("core", ip, sshKeyPath, sshCommand) + sc := newSshClient(ip, agent.Socket, sshCommand) go sc.controlStartStop() ready := false @@ -169,13 +182,13 @@ func runDevShellSSH(ctx context.Context, builder *platform.QemuBuilder, conf *co // it directly to the instance. The intercept of ctrl-c will only happen when // ssh is not in the foreground. case <-sigintChan: - inst.Kill() + _ = inst.Kill() // handle console messages. If SSH is not ready, then display a // a status message on the console. case serialMsg := <-serialChan: if !ready { - displayStatusMsg(statusMsg, serialMsg) + displayStatusMsg(statusMsg, serialMsg, termMaxWidth) } lastMsg = serialMsg // monitor the err channel @@ -189,7 +202,7 @@ func runDevShellSSH(ctx context.Context, builder *platform.QemuBuilder, conf *co // monitor the instance state case <-qemuWaitChan: - displayStatusMsg("DONE", "QEMU instance terminated") + displayStatusMsg("DONE", "QEMU instance terminated", termMaxWidth) return nil // monitor the machine state events from console/serial logs @@ -207,7 +220,7 @@ func runDevShellSSH(ctx context.Context, builder *platform.QemuBuilder, conf *co statusMsg = "QEMU guest is shutting down" case guestStateHalted: statusMsg = "QEMU guest is halted" - inst.Kill() + _ = inst.Kill() case guestStateInReboot: statusMsg = "QEMU guest initiated reboot" case guestStateOpenSshStopped: @@ -220,17 +233,17 @@ func runDevShellSSH(ctx context.Context, builder *platform.QemuBuilder, conf *co statusMsg = "QEMU guest is booting" } } - displayStatusMsg(fmt.Sprintf("EVENT | %s", statusMsg), lastMsg) + displayStatusMsg(fmt.Sprintf("EVENT | %s", statusMsg), lastMsg, termMaxWidth) // monitor the SSH connection case err := <-sc.errChan: if err == nil { sc.controlChan <- sshNotReady - displayStatusMsg("SESSION", "Clean exit from SSH, terminating instance") + displayStatusMsg("SESSION", "Clean exit from SSH, terminating instance", termMaxWidth) return nil } else if sshCommand != "" { sc.controlChan <- sshNotReady - displayStatusMsg("SESSION", "SSH command exited, terminating instance") + displayStatusMsg("SESSION", "SSH command exited, terminating instance", termMaxWidth) return err } if ready { @@ -274,7 +287,7 @@ func checkWriteState(msg string, c chan<- guestState) { strings.Contains(msg, "Starting Halt...") { c <- guestStateHalted } - if strings.Contains(msg, "pam_unix(sshd:session): session closed for user core") { + if strings.Contains(msg, "pam_unix(sshd:session): session closed for user nest") { c <- guestStateSshDisconnected } if strings.Contains(msg, "The selected entry will be started automatically in 1s.") { @@ -439,10 +452,9 @@ const ( // sshClient represents a single SSH session. type sshClient struct { mu sync.Mutex - user string host string port string - privKey string + agent string cmd string controlChan chan sshControlMessage errChan chan error @@ -450,7 +462,7 @@ type sshClient struct { } // newSshClient creates a new sshClient. -func newSshClient(user, host, privKey, cmd string) *sshClient { +func newSshClient(host, agent, cmd string) *sshClient { parts := strings.Split(host, ":") host = parts[0] port := parts[1] @@ -460,10 +472,9 @@ func newSshClient(user, host, privKey, cmd string) *sshClient { return &sshClient{ mu: sync.Mutex{}, - user: user, host: host, port: port, - privKey: privKey, + agent: agent, controlChan: make(chan sshControlMessage), errChan: make(chan error), // this could be a []string, but ssh sends it over as a string anyway, so meh... @@ -491,17 +502,18 @@ func (sc *sshClient) start() { sshArgs := []string{ "ssh", "-t", - "-i", sc.privKey, + "-o", "User=nest", "-o", "StrictHostKeyChecking=no", "-o", "CheckHostIP=no", - "-o", "IdentityFile=/dev/null", - "-p", sc.port, - fmt.Sprintf("%s@%s", sc.user, sc.host), + "-o", "IdentityAgent=" + sc.agent, + "-o", "PreferredAuthentications=publickey", + "-p", sc.port, sc.host, } if sc.cmd != "" { sshArgs = append(sshArgs, "--", sc.cmd) } - fmt.Println("") // line break for prettier output + fmt.Printf("\033[2K\r") // clear serial console line + fmt.Printf("[SESSION] Starting SSH\r") // and stage a status msg which will be erased sshCmd := exec.Command(sshArgs[0], sshArgs[1:]...) sshCmd.Stdin = os.Stdin sshCmd.Stdout = os.Stdout @@ -520,7 +532,7 @@ func (sc *sshClient) start() { for scanner.Scan() { msg := scanner.Text() if strings.Contains(msg, "Connection to 127.0.0.1 closed") { - displayStatusMsg("SSH", "connection closed") + displayStatusMsg("SSH", "connection closed", 0) } } }() diff --git a/mantle/cmd/kola/kola.go b/mantle/cmd/kola/kola.go index 1d50bb7a..ec4ce253 100644 --- a/mantle/cmd/kola/kola.go +++ b/mantle/cmd/kola/kola.go @@ -18,17 +18,20 @@ import ( "encoding/json" "fmt" "io/ioutil" + "math/rand" "net/http" "os" "path/filepath" + "regexp" "sort" "text/tabwriter" + "time" "github.com/coreos/pkg/capnslog" "github.com/pkg/errors" "github.com/spf13/cobra" - "github.com/coreos/coreos-assembler-schema/cosa" + cosa "github.com/coreos/coreos-assembler/pkg/builds" "github.com/coreos/mantle/cli" "github.com/coreos/mantle/fcos" "github.com/coreos/mantle/harness/reporters" @@ -37,6 +40,7 @@ import ( "github.com/coreos/mantle/kola/register" "github.com/coreos/mantle/system" "github.com/coreos/mantle/util" + coreosarch "github.com/coreos/stream-metadata-go/arch" // register OS test suite _ "github.com/coreos/mantle/kola/registry" @@ -104,6 +108,14 @@ This can be useful for e.g. serving locally built OSTree repos to qemu. SilenceUsage: true, } + cmdNcpu = &cobra.Command{ + Use: "ncpu", + Short: "Report the number of available CPUs for parallelism", + RunE: runNcpu, + + SilenceUsage: true, + } + listJSON bool listPlatform string listDistro string @@ -112,9 +124,12 @@ This can be useful for e.g. serving locally built OSTree repos to qemu. qemuImageDir string qemuImageDirIsTemp bool - runExternals []string - runMultiply int - runRerunFlag bool + runExternals []string + runMultiply int + runRerunFlag bool + allowRerunSuccess bool + + nonexclusiveWrapperMatch = regexp.MustCompile(`^non-exclusive-test-bucket-[0-9]$`) ) func init() { @@ -122,6 +137,7 @@ func init() { cmdRun.Flags().StringArrayVarP(&runExternals, "exttest", "E", nil, "Externally defined tests (will be found in DIR/tests/kola)") cmdRun.Flags().IntVar(&runMultiply, "multiply", 0, "Run the provided tests N times (useful to find race conditions)") cmdRun.Flags().BoolVar(&runRerunFlag, "rerun", false, "re-run failed tests once") + cmdRun.Flags().BoolVar(&allowRerunSuccess, "allow-rerun-success", false, "Allow kola test run to be successful when tests pass during re-run") root.AddCommand(cmdList) cmdList.Flags().StringArrayVarP(&runExternals, "exttest", "E", nil, "Externally defined tests in directory") @@ -138,9 +154,13 @@ func init() { cmdRunUpgrade.Flags().BoolVar(&runRerunFlag, "rerun", false, "re-run failed tests once") root.AddCommand(cmdRerun) + + root.AddCommand(cmdNcpu) } func main() { + // initialize global state + rand.Seed(time.Now().UnixNano()) cli.Execute(root) } @@ -199,12 +219,22 @@ func runRerun(cmd *cobra.Command, args []string) error { return err } for _, test := range data.Tests { - name, isRerunnable := kola.GetRerunnableTestName(test.Name) - if test.Result == testresult.Fail && isRerunnable { - patterns = append(patterns, name) + if nonexclusiveWrapperMatch.MatchString(test.Name) { + // When the test hasn't started yet, we get the subtests + // of the test(nonExclusiveWrapper) for re-running + for _, subtest := range test.Subtests { + name, isRerunnable := kola.GetRerunnableTestName(subtest) + if test.Result == testresult.Fail && isRerunnable { + patterns = append(patterns, name) + } + } + } else { + name, isRerunnable := kola.GetRerunnableTestName(test.Name) + if test.Result == testresult.Fail && isRerunnable { + patterns = append(patterns, name) + } } } - return kolaRunPatterns(patterns, false) } @@ -219,7 +249,7 @@ func kolaRunPatterns(patterns []string, rerun bool) error { return err } - runErr := kola.RunTests(patterns, runMultiply, rerun, kolaPlatform, outputDir, !kola.Options.NoTestExitError) + runErr := kola.RunTests(patterns, runMultiply, rerun, allowRerunSuccess, kolaPlatform, outputDir, !kola.Options.NoTestExitError) // needs to be after RunTests() because harness empties the directory if err := writeProps(); err != nil { @@ -363,7 +393,8 @@ func runList(cmd *cobra.Command, args []string) error { test.Architectures, test.ExcludeArchitectures, test.Distros, - test.ExcludeDistros} + test.ExcludeDistros, + test.Tags} item.updateValues() testlist = append(testlist, item) } @@ -375,7 +406,7 @@ func runList(cmd *cobra.Command, args []string) error { if !listJSON { var w = tabwriter.NewWriter(os.Stdout, 0, 8, 0, '\t', 0) - fmt.Fprintln(w, "Test Name\tPlatforms\tArchitectures\tDistributions") + fmt.Fprintln(w, "Test Name\tPlatforms\tArchitectures\tDistributions\tTags") fmt.Fprintln(w, "\t") for _, item := range testlist { platformFound := (listPlatform == "all") @@ -422,6 +453,7 @@ type item struct { ExcludeArchitectures []string `json:"-"` Distros []string ExcludeDistros []string `json:"-"` + Tags []string } func (i *item) updateValues() { @@ -461,7 +493,7 @@ func (i *item) updateValues() { } func (i item) String() string { - return fmt.Sprintf("%v\t%v\t%v\t%v", i.Name, i.Platforms, i.Architectures, i.Distros) + return fmt.Sprintf("%v\t%v\t%v\t%v\t%v", i.Name, i.Platforms, i.Architectures, i.Distros, i.Tags) } func runHTTPServer(cmd *cobra.Command, args []string) error { @@ -527,15 +559,15 @@ func syncFindParentImageOptions() error { // Hardcoded for now based on https://github.com/openshift/installer/blob/release-4.6/data/data/rhcos.json tag := "rhcos-4.6" release := "46.82.202011260640-0" - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "s390x": - tag += "-" + system.RpmArch() + tag += "-" + coreosarch.CurrentRpmArch() release = "46.82.202011261339-0" case "ppc64le": - tag += "-" + system.RpmArch() + tag += "-" + coreosarch.CurrentRpmArch() release = "46.82.202011260639-0" } - parentBaseURL = fmt.Sprintf("https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/%s/%s/%s/", tag, release, system.RpmArch()) + parentBaseURL = fmt.Sprintf("https://rhcos.mirror.openshift.com/art/storage/releases/%s/%s/%s/", tag, release, coreosarch.CurrentRpmArch()) // sigh...someday we'll get the stuff signed by ART or maybe https://github.com/openshift/enhancements/pull/201 will just happen skipSignature = true default: @@ -558,6 +590,9 @@ func syncFindParentImageOptions() error { } qemuImageDirIsTemp = true } + if parentCosaBuild.BuildArtifacts.Qemu == nil { + return fmt.Errorf("No QEMU in parent meta.json") + } qcowURL := parentBaseURL + parentCosaBuild.BuildArtifacts.Qemu.Path qcowLocal := filepath.Join(qemuImageDir, parentCosaBuild.BuildArtifacts.Qemu.Path) decompressedQcowLocal, err := util.DownloadImageAndDecompress(qcowURL, qcowLocal, skipSignature) @@ -595,17 +630,29 @@ func getParentFcosBuildBase(stream string) (string, error) { if kola.CosaBuild.Meta.FedoraCoreOsParentVersion != "" { parentVersion = kola.CosaBuild.Meta.FedoraCoreOsParentVersion } else { - // ok, we're probably operating on a local dev build since the pipeline - // always injects the parent; just instead fetch the release index - // for that stream and get the last build id from there + // ok, we're probably operating on a local dev build or in the + // bump-lockfile job since the pipeline always injects the + // parent; just instead fetch the release index for that stream + // and get the last build id from there index, err := fcos.FetchAndParseCanonicalReleaseIndex(stream) if err != nil { return "", err } - - n := len(index.Releases) - if n == 0 { - // hmmm, no builds; likely a new stream. let's just fallback on testing-devel. + // as we build for multi-architectures now, + // inorder to allow failures to be non-fatal and + // enable them to promote to non-prod streams + // when any specific previous release is unavailable for that arch. + // in that case, releases are searched in reverse order + // and the most recent release which has the secondary arch is considered. + for _, release := range index.Releases { + for _, commit := range release.Commits { + if commit.Architecture == (kola.Options.CosaBuildArch) { + parentVersion = release.Version + break + } + } + } + if parentVersion == "" { msg := fmt.Sprintf("no parent version in build metadata, and no build on stream %s", stream) if stream == "testing-devel" { return "", errors.New(msg) @@ -613,8 +660,6 @@ func getParentFcosBuildBase(stream string) (string, error) { plog.Infof("%s; falling back to testing-devel", msg) return getParentFcosBuildBase("testing-devel") } - - parentVersion = index.Releases[n-1].Version } return fcos.GetCosaBuildURL(stream, parentVersion, kola.Options.CosaBuildArch), nil @@ -644,3 +689,12 @@ func runRunUpgrade(cmd *cobra.Command, args []string) error { return runErr } + +func runNcpu(cmd *cobra.Command, args []string) error { + count, err := system.GetProcessors() + if err != nil { + return err + } + fmt.Println(count) + return nil +} diff --git a/mantle/cmd/kola/options.go b/mantle/cmd/kola/options.go index 2729a4d5..4434c778 100644 --- a/mantle/cmd/kola/options.go +++ b/mantle/cmd/kola/options.go @@ -18,8 +18,10 @@ import ( "fmt" "os" "path/filepath" + "strconv" "strings" + coreosarch "github.com/coreos/stream-metadata-go/arch" "github.com/coreos/stream-metadata-go/stream" "github.com/pkg/errors" @@ -28,16 +30,17 @@ import ( "github.com/coreos/mantle/kola" "github.com/coreos/mantle/platform" "github.com/coreos/mantle/rhcos" - "github.com/coreos/mantle/sdk" "github.com/coreos/mantle/system" + "github.com/coreos/mantle/util" ) var ( outputDir string kolaPlatform string + kolaParallelArg string kolaArchitectures = []string{"amd64"} kolaPlatforms = []string{"aws", "azure", "do", "esx", "gce", "openstack", "packet", "qemu", "qemu-unpriv", "qemu-iso"} - kolaDistros = []string{"fcos", "rhcos", "nestos"} + kolaDistros = []string{"fcos", "rhcos", "scos", "nestos"} ) func init() { @@ -50,19 +53,20 @@ func init() { sv(&outputDir, "output-dir", "", "Temporary output directory for test data and logs") root.PersistentFlags().StringVarP(&kolaPlatform, "platform", "p", "", "VM platform: "+strings.Join(kolaPlatforms, ", ")) root.PersistentFlags().StringVarP(&kola.Options.Distribution, "distro", "b", "", "Distribution: "+strings.Join(kolaDistros, ", ")) - root.PersistentFlags().IntVarP(&kola.TestParallelism, "parallel", "j", 1, "number of tests to run in parallel") + root.PersistentFlags().StringVarP(&kolaParallelArg, "parallel", "j", "1", "number of tests to run in parallel, or \"auto\" to match CPU count") sv(&kola.TAPFile, "tapfile", "", "file to write TAP results to") root.PersistentFlags().BoolVarP(&kola.Options.NoTestExitError, "no-test-exit-error", "T", false, "Don't exit with non-zero if tests fail") sv(&kola.Options.BaseName, "basename", "kola", "Cluster name prefix") ss("debug-systemd-unit", []string{}, "full-unit-name.service to enable SYSTEMD_LOG_LEVEL=debug on. Can be specified multiple times.") ssv(&kola.DenylistedTests, "denylist-test", []string{}, "Test pattern to add to denylist. Can be specified multiple times.") bv(&kola.NoNet, "no-net", false, "Don't run tests that require an Internet connection") + bv(&kola.ForceRunPlatformIndependent, "run-platform-independent", false, "Run tests that claim platform independence") ssv(&kola.Tags, "tag", []string{}, "Test tag to run. Can be specified multiple times.") bv(&kola.Options.SSHOnTestFailure, "ssh-on-test-failure", false, "SSH into a machine when tests fail") sv(&kola.Options.Stream, "stream", "", "CoreOS stream ID (e.g. for Fedora CoreOS: stable, testing, next)") sv(&kola.Options.CosaWorkdir, "workdir", "", "coreos-assembler working directory") sv(&kola.Options.CosaBuildId, "build", "", "coreos-assembler build ID") - sv(&kola.Options.CosaBuildArch, "arch", system.RpmArch(), "The target architecture of the build") + sv(&kola.Options.CosaBuildArch, "arch", coreosarch.CurrentRpmArch(), "The target architecture of the build") // rhcos-specific options sv(&kola.Options.OSContainer, "oscontainer", "", "oscontainer image pullspec for pivot (RHCOS only)") @@ -108,13 +112,14 @@ func init() { // gce-specific options sv(&kola.GCEOptions.Image, "gce-image", "", "GCE image, full api endpoints names are accepted if resource is in a different project") - sv(&kola.GCEOptions.Project, "gce-project", "coreos-gce-testing", "GCE project name") + sv(&kola.GCEOptions.Project, "gce-project", "fedora-coreos-devel", "GCE project name") sv(&kola.GCEOptions.Zone, "gce-zone", "us-central1-a", "GCE zone name") sv(&kola.GCEOptions.MachineType, "gce-machinetype", "n1-standard-1", "GCE machine type") sv(&kola.GCEOptions.DiskType, "gce-disktype", "pd-ssd", "GCE disk type") sv(&kola.GCEOptions.Network, "gce-network", "default", "GCE network") + sv(&kola.GCEOptions.ServiceAcct, "gce-service-account", "", "GCE service account to attach to instance (default project default)") bv(&kola.GCEOptions.ServiceAuth, "gce-service-auth", false, "for non-interactive auth when running within GCE") - sv(&kola.GCEOptions.JSONKeyFile, "gce-json-key", "", "use a service account's JSON key for authentication") + sv(&kola.GCEOptions.JSONKeyFile, "gce-json-key", "", "use a service account's JSON key for authentication (default \"~/"+auth.GCEConfigPath+"\")") // openstack-specific options sv(&kola.OpenStackOptions.ConfigPath, "openstack-config-file", "", "Path to a clouds.yaml formatted OpenStack config file. The underlying library defaults to ./clouds.yaml") @@ -180,6 +185,21 @@ func syncOptionsImpl(useCosa bool) error { kolaPlatform = "qemu-iso" } + // test parallelism + if kolaParallelArg == "auto" { + ncpu, err := system.GetProcessors() + if err != nil { + return fmt.Errorf("detecting CPU count: %w", err) + } + kola.TestParallelism = int(ncpu) + } else { + parallel, err := strconv.ParseInt(kolaParallelArg, 10, 32) + if err != nil { + return fmt.Errorf("parsing --parallel argument: %w", err) + } + kola.TestParallelism = int(parallel) + } + // native 4k requires a UEFI bootloader if kola.QEMUOptions.Native4k && kola.QEMUOptions.Firmware == "bios" { return fmt.Errorf("native 4k requires uefi firmware") @@ -225,7 +245,7 @@ func syncOptionsImpl(useCosa bool) error { kola.Options.CosaWorkdir = "." } - localbuild, err := sdk.GetLocalBuild(kola.Options.CosaWorkdir, + localbuild, err := util.GetLocalBuild(kola.Options.CosaWorkdir, kola.Options.CosaBuildId, kola.Options.CosaBuildArch) if err != nil { @@ -243,7 +263,7 @@ func syncOptionsImpl(useCosa bool) error { // specified neither --build nor --workdir; only opportunistically // try to use the PWD as the workdir, but don't error out if it's // not - if isroot, err := sdk.IsCosaRoot("."); err != nil { + if isroot, err := util.IsCosaRoot("."); err != nil { return err } else if isroot { kola.Options.CosaWorkdir = "." @@ -251,7 +271,7 @@ func syncOptionsImpl(useCosa bool) error { } if kola.Options.CosaWorkdir != "" && kola.Options.CosaWorkdir != "none" { - localbuild, err := sdk.GetLatestLocalBuild(kola.Options.CosaWorkdir, + localbuild, err := util.GetLatestLocalBuild(kola.Options.CosaWorkdir, kola.Options.CosaBuildArch) if err != nil { if !os.IsNotExist(errors.Cause(err)) { @@ -263,7 +283,7 @@ func syncOptionsImpl(useCosa bool) error { foundCosa = true } } else if kola.QEMUOptions.DiskImage == "" { - localbuild, err := sdk.GetLocalFastBuildQemu() + localbuild, err := util.GetLocalFastBuildQemu() if err != nil { return err } @@ -288,12 +308,11 @@ func syncOptionsImpl(useCosa bool) error { }) } - if kola.Options.OSContainer != "" && kola.Options.Distribution != "rhcos" { - return fmt.Errorf("oscontainer is only supported on rhcos") - } - if kola.Options.Distribution == "" { kola.Options.Distribution = kolaDistros[0] + } else if kola.Options.Distribution == "scos" { + // Consider SCOS the same as RHCOS for now + kola.Options.Distribution = "rhcos" } else if err := validateOption("distro", kola.Options.Distribution, kolaDistros); err != nil { return err } @@ -341,7 +360,7 @@ func syncCosaOptions() error { } if kola.Options.Distribution == "" { - distro, err := sdk.TargetDistro(kola.CosaBuild.Meta) + distro, err := util.TargetDistro(kola.CosaBuild.Meta) if err != nil { return err } @@ -375,7 +394,6 @@ func syncStreamOptions() error { if err != nil { return errors.Wrapf(err, "failed to fetch stream") } - break default: return fmt.Errorf("Unhandled stream for distribution %s", kola.Options.Distribution) } diff --git a/mantle/cmd/kola/qemuexec.go b/mantle/cmd/kola/qemuexec.go index 3afa046e..1f20770d 100644 --- a/mantle/cmd/kola/qemuexec.go +++ b/mantle/cmd/kola/qemuexec.go @@ -174,7 +174,7 @@ func runQemuExec(cmd *cobra.Command, args []string) error { cpuCountHost = true usernet = true // Can't use 9p on RHEL8, need https://virtio-fs.gitlab.io/ instead in the future - if kola.Options.CosaWorkdir != "" && !strings.HasPrefix(filepath.Base(kola.QEMUOptions.DiskImage), "rhcos") { + if kola.Options.CosaWorkdir != "" && !strings.HasPrefix(filepath.Base(kola.QEMUOptions.DiskImage), "rhcos") && !strings.HasPrefix(filepath.Base(kola.QEMUOptions.DiskImage), "scos") && kola.Options.Distribution != "rhcos" && kola.Options.Distribution != "scos" { // Conservatively bind readonly to avoid anything in the guest (stray tests, whatever) // from destroying stuff bindro = append(bindro, fmt.Sprintf("%s,/var/mnt/workdir", kola.Options.CosaWorkdir)) @@ -307,7 +307,9 @@ func runQemuExec(cmd *cobra.Command, args []string) error { } builder.Memory = int(parsedMem) } - builder.AddDisksFromSpecs(addDisks) + if err = builder.AddDisksFromSpecs(addDisks); err != nil { + return err + } if cpuCountHost { builder.Processors = -1 } diff --git a/mantle/cmd/kola/switchkernel.go b/mantle/cmd/kola/switchkernel.go index eabe3b24..c4f06651 100644 --- a/mantle/cmd/kola/switchkernel.go +++ b/mantle/cmd/kola/switchkernel.go @@ -43,7 +43,7 @@ var ( ) var ( - homeDir = `/var/home/core` + homeDir = `/var/home/nest` switchKernelScript = `#!/usr/bin/env bash # This script is a shameless translation of: https://github.com/openshift/machine-config-operator/blob/f363c7be6d2d506d900e196fa2e2d05ca08b93b6/pkg/daemon/update.go#L651 # Usage: @@ -60,7 +60,7 @@ var ( if [[ $FROM_KERNEL == "default" && $TO_KERNEL == "rt-kernel" ]]; then # Switch from default to RT Kernel - # https://github.com/openshift/machine-config-operator/blob/master/pkg/daemon/update.go#L711 + # https://github.com/openshift/machine-config-operator/blob/e246be62e7839a086bc4494203472349c406dcae/pkg/daemon/update.go#L711 RT_KERNEL_REPO=$3 if [[ -z $(ls ${RT_KERNEL_REPO}) ]]; then echo "No kernel-rt package available in the repo: ${RT_KERNEL_REPO}" @@ -111,7 +111,7 @@ func runSwitchKernel(cmd *cobra.Command, args []string) error { "files": [ { "filesystem": "root", - "path": "/var/home/core/switch-kernel.sh", + "path": "/var/home/nest/switch-kernel.sh", "contents": { "source": "data:text/plain;base64,%s" }, diff --git a/mantle/cmd/kola/testiso.go b/mantle/cmd/kola/testiso.go index 13b124c4..72ce91b7 100644 --- a/mantle/cmd/kola/testiso.go +++ b/mantle/cmd/kola/testiso.go @@ -31,8 +31,8 @@ import ( "time" "github.com/coreos/mantle/platform/conf" - "github.com/coreos/mantle/system" "github.com/coreos/mantle/util" + coreosarch "github.com/coreos/stream-metadata-go/arch" "github.com/pkg/errors" "github.com/spf13/cobra" @@ -142,7 +142,20 @@ ExecStart=/bin/sh -c '/usr/bin/echo %s >/dev/virtio-ports/testisocompletion && s RequiredBy=multi-user.target `, signalCompleteString) -var checkNoIgnition = fmt.Sprintf(`[Unit] +var signalEmergencyString = "coreos-installer-test-entered-emergency-target" +var signalFailureUnit = fmt.Sprintf(`[Unit] +Description=TestISO Signal Failure +Requires=dev-virtio\\x2dports-testisocompletion.device +DefaultDependencies=false +[Service] +Type=oneshot +RemainAfterExit=yes +ExecStart=/bin/sh -c '/usr/bin/echo %s >/dev/virtio-ports/testisocompletion && systemctl poweroff' +[Install] +RequiredBy=emergency.target +`, signalEmergencyString) + +var checkNoIgnition = `[Unit] Description=TestISO Verify No Ignition Config OnFailure=emergency.target OnFailureJobMode=isolate @@ -154,9 +167,9 @@ Type=oneshot RemainAfterExit=yes ExecStart=/bin/sh -c '[ ! -e /boot/ignition ]' [Install] -RequiredBy=multi-user.target`) +RequiredBy=multi-user.target` -var multipathedRoot = fmt.Sprintf(`[Unit] +var multipathedRoot = `[Unit] Description=TestISO Verify Multipathed Root OnFailure=emergency.target OnFailureJobMode=isolate @@ -166,9 +179,9 @@ Type=oneshot RemainAfterExit=yes ExecStart=/bin/bash -c '[[ $(findmnt -nvro SOURCE /sysroot) == /dev/mapper/mpatha4 ]]' [Install] -RequiredBy=multi-user.target`) +RequiredBy=multi-user.target` -var verifyNoEFIBootEntry = fmt.Sprintf(`[Unit] +var verifyNoEFIBootEntry = `[Unit] Description=TestISO Verify No EFI Boot Entry OnFailure=emergency.target OnFailureJobMode=isolate @@ -182,13 +195,17 @@ ExecStart=/bin/sh -c '! efibootmgr -v | grep -E "(HD|CDROM)\("' # for install scenarios RequiredBy=coreos-installer.target # for iso-as-disk -RequiredBy=multi-user.target`) +RequiredBy=multi-user.target` var nmConnectionId = "CoreOS DHCP" var nmConnectionFile = "coreos-dhcp.nmconnection" var nmConnection = fmt.Sprintf(`[connection] id=%s type=ethernet +# add wait-device-timeout here so we make sure NetworkManager-wait-online.service will +# wait for a device to be present before exiting. See +# https://github.com/coreos/fedora-coreos-tracker/issues/1275#issuecomment-1231605438 +wait-device-timeout=20000 [ipv4] method=auto @@ -293,7 +310,7 @@ func newQemuBuilderWithDisk(outdir string) (*platform.QemuBuilder, *conf.Conf, e } //TBD: see if we can remove this and just use AddDisk and inject bootindex during startup - if system.RpmArch() == "s390x" || system.RpmArch() == "aarch64" { + if coreosarch.CurrentRpmArch() == "s390x" || coreosarch.CurrentRpmArch() == "aarch64" { // s390x and aarch64 need to use bootindex as they don't support boot once if err := builder.AddDisk(&disk); err != nil { return nil, nil, err @@ -323,7 +340,7 @@ func runTestIso(cmd *cobra.Command, args []string) error { } // s390x: iso-install does not work because s390x uses an El Torito image - if system.RpmArch() == "s390x" { + if coreosarch.CurrentRpmArch() == "s390x" { fmt.Println("Skipping iso-install on s390x") noiso = true } @@ -388,11 +405,11 @@ func runTestIso(cmd *cobra.Command, args []string) error { ranTest = true instPxe := baseInst // Pretend this is Rust and I wrote .copy() - if err := testPXE(ctx, instPxe, filepath.Join(outputDir, scenarioPXEInstall), false); err != nil { - return errors.Wrapf(err, "scenario %s", scenarioPXEInstall) - + duration, err := testPXE(ctx, instPxe, filepath.Join(outputDir, scenarioPXEInstall), false) + printResult(scenarioPXEInstall, duration, err) + if err != nil { + return err } - printSuccess(scenarioPXEInstall) } if _, ok := targetScenarios[scenarioPXEOfflineInstall]; ok { if kola.CosaBuild.Meta.BuildArtifacts.LiveKernel == nil { @@ -402,11 +419,11 @@ func runTestIso(cmd *cobra.Command, args []string) error { ranTest = true instPxe := baseInst // Pretend this is Rust and I wrote .copy() - if err := testPXE(ctx, instPxe, filepath.Join(outputDir, scenarioPXEOfflineInstall), true); err != nil { - return errors.Wrapf(err, "scenario %s", scenarioPXEOfflineInstall) - + duration, err := testPXE(ctx, instPxe, filepath.Join(outputDir, scenarioPXEOfflineInstall), true) + printResult(scenarioPXEOfflineInstall, duration, err) + if err != nil { + return err } - printSuccess(scenarioPXEOfflineInstall) } if _, ok := targetScenarios[scenarioISOInstall]; ok { if kola.CosaBuild.Meta.BuildArtifacts.LiveIso == nil { @@ -414,10 +431,11 @@ func runTestIso(cmd *cobra.Command, args []string) error { } ranTest = true instIso := baseInst // Pretend this is Rust and I wrote .copy() - if err := testLiveIso(ctx, instIso, filepath.Join(outputDir, scenarioISOInstall), false, false); err != nil { - return errors.Wrapf(err, "scenario %s", scenarioISOInstall) + duration, err := testLiveIso(ctx, instIso, filepath.Join(outputDir, scenarioISOInstall), false, false) + printResult(scenarioISOInstall, duration, err) + if err != nil { + return err } - printSuccess(scenarioISOInstall) } if _, ok := targetScenarios[scenarioISOOfflineInstall]; ok { if kola.CosaBuild.Meta.BuildArtifacts.LiveIso == nil { @@ -425,35 +443,38 @@ func runTestIso(cmd *cobra.Command, args []string) error { } ranTest = true instIso := baseInst // Pretend this is Rust and I wrote .copy() - if err := testLiveIso(ctx, instIso, filepath.Join(outputDir, scenarioISOOfflineInstall), true, false); err != nil { - return errors.Wrapf(err, "scenario %s", scenarioISOOfflineInstall) + duration, err := testLiveIso(ctx, instIso, filepath.Join(outputDir, scenarioISOOfflineInstall), true, false) + printResult(scenarioISOOfflineInstall, duration, err) + if err != nil { + return err } - printSuccess(scenarioISOOfflineInstall) } if _, ok := targetScenarios[scenarioISOLiveLogin]; ok { if kola.CosaBuild.Meta.BuildArtifacts.LiveIso == nil { return fmt.Errorf("build %s has no live ISO", kola.CosaBuild.Meta.Name) } ranTest = true - if err := testLiveLogin(ctx, filepath.Join(outputDir, scenarioISOLiveLogin)); err != nil { - return errors.Wrapf(err, "scenario %s", scenarioISOLiveLogin) + duration, err := testLiveLogin(ctx, filepath.Join(outputDir, scenarioISOLiveLogin)) + printResult(scenarioISOLiveLogin, duration, err) + if err != nil { + return err } - printSuccess(scenarioISOLiveLogin) } if _, ok := targetScenarios[scenarioISOAsDisk]; ok { if kola.CosaBuild.Meta.BuildArtifacts.LiveIso == nil { return fmt.Errorf("build %s has no live ISO", kola.CosaBuild.Meta.Name) } - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "x86_64": ranTest = true - if err := testAsDisk(ctx, filepath.Join(outputDir, scenarioISOAsDisk)); err != nil { - return errors.Wrapf(err, "scenario %s", scenarioISOAsDisk) + duration, err := testAsDisk(ctx, filepath.Join(outputDir, scenarioISOAsDisk)) + printResult(scenarioISOAsDisk, duration, err) + if err != nil { + return err } - printSuccess(scenarioISOAsDisk) default: // no hybrid partition table to boot from - fmt.Printf("%s unsupported on %s; skipping\n", scenarioISOAsDisk, system.RpmArch()) + fmt.Printf("%s unsupported on %s; skipping\n", scenarioISOAsDisk, coreosarch.CurrentRpmArch()) } } if _, ok := targetScenarios[scenarioMinISOInstall]; ok { @@ -462,10 +483,11 @@ func runTestIso(cmd *cobra.Command, args []string) error { } ranTest = true instIso := baseInst // Pretend this is Rust and I wrote .copy() - if err := testLiveIso(ctx, instIso, filepath.Join(outputDir, scenarioMinISOInstall), false, true); err != nil { - return errors.Wrapf(err, "scenario %s", scenarioMinISOInstall) + duration, err := testLiveIso(ctx, instIso, filepath.Join(outputDir, scenarioMinISOInstall), false, true) + printResult(scenarioMinISOInstall, duration, err) + if err != nil { + return err } - printSuccess(scenarioMinISOInstall) } if _, ok := targetScenarios[scenarioMinISOInstallNm]; ok { if kola.CosaBuild.Meta.BuildArtifacts.LiveIso == nil { @@ -474,10 +496,11 @@ func runTestIso(cmd *cobra.Command, args []string) error { ranTest = true instIso := baseInst // Pretend this is Rust and I wrote .copy() addNmKeyfile = true - if err := testLiveIso(ctx, instIso, filepath.Join(outputDir, scenarioMinISOInstallNm), false, true); err != nil { - return errors.Wrapf(err, "scenario %s", scenarioMinISOInstallNm) + duration, err := testLiveIso(ctx, instIso, filepath.Join(outputDir, scenarioMinISOInstallNm), false, true) + printResult(scenarioMinISOInstallNm, duration, err) + if err != nil { + return err } - printSuccess(scenarioMinISOInstallNm) } if !ranTest { @@ -487,7 +510,8 @@ func runTestIso(cmd *cobra.Command, args []string) error { return nil } -func awaitCompletion(ctx context.Context, inst *platform.QemuInstance, outdir string, qchan *os.File, booterrchan chan error, expected []string) error { +func awaitCompletion(ctx context.Context, inst *platform.QemuInstance, outdir string, qchan *os.File, booterrchan chan error, expected []string) (time.Duration, error) { + start := time.Now() errchan := make(chan error) go func() { time.Sleep(installTimeout) @@ -513,8 +537,12 @@ func awaitCompletion(ctx context.Context, inst *platform.QemuInstance, outdir st } go func() { err := inst.Wait() + // only one Wait() gets process data, so also manually check for signal + if err == nil && inst.Signaled() { + err = errors.New("process killed") + } if err != nil { - errchan <- err + errchan <- errors.Wrapf(err, "QEMU unexpectedly exited while awaiting completion") } time.Sleep(1 * time.Minute) errchan <- fmt.Errorf("QEMU exited; timed out waiting for completion") @@ -525,6 +553,10 @@ func awaitCompletion(ctx context.Context, inst *platform.QemuInstance, outdir st l, err := r.ReadString('\n') if err != nil { if err == io.EOF { + // this may be from QEMU getting killed or exiting; wait a bit + // to give a chance for .Wait() above to feed the channel with a + // better error + time.Sleep(1 * time.Second) errchan <- fmt.Errorf("Got EOF from completion channel, %s expected", exp) } else { errchan <- errors.Wrapf(err, "reading from completion channel") @@ -548,56 +580,67 @@ func awaitCompletion(ctx context.Context, inst *platform.QemuInstance, outdir st } } }() - return <-errchan + err := <-errchan + return time.Since(start), err } -func printSuccess(mode string) { - metaltype := "metal" +func printResult(mode string, duration time.Duration, err error) bool { + result := "PASS" + if err != nil { + result = "FAIL" + } + variant := []string{kola.QEMUOptions.Firmware} if kola.QEMUOptions.Native4k { - metaltype = "metal4k" + variant = append(variant, "metal4k") + } else { + variant = append(variant, "metal") } - onMultipath := "" if kola.QEMUOptions.MultiPathDisk { - onMultipath = " on multipath" + variant = append(variant, "multipath") } - withNmKeyfile := "" if addNmKeyfile { - withNmKeyfile = " with NM keyfile" + variant = append(variant, "nm-keyfile") } - fmt.Printf("Successfully tested scenario %s for %s on %s (%s%s%s)\n", mode, kola.CosaBuild.Meta.OstreeVersion, kola.QEMUOptions.Firmware, metaltype, onMultipath, withNmKeyfile) + fmt.Printf("%s: %s (%s) (%s)\n", result, mode, strings.Join(variant, " + "), duration.Round(time.Millisecond).String()) + if err != nil { + fmt.Printf(" %s\n", err) + return true + } + return false } -func testPXE(ctx context.Context, inst platform.Install, outdir string, offline bool) error { +func testPXE(ctx context.Context, inst platform.Install, outdir string, offline bool) (time.Duration, error) { if addNmKeyfile { - return errors.New("--add-nm-keyfile not yet supported for PXE") + return 0, errors.New("--add-nm-keyfile not yet supported for PXE") } tmpd, err := ioutil.TempDir("", "kola-testiso") if err != nil { - return err + return 0, err } defer os.RemoveAll(tmpd) sshPubKeyBuf, _, err := util.CreateSSHAuthorizedKey(tmpd) if err != nil { - return err + return 0, err } builder, virtioJournalConfig, err := newQemuBuilderWithDisk(outdir) if err != nil { - return err + return 0, err } inst.Builder = builder completionChannel, err := inst.Builder.VirtioChannelRead("testisocompletion") if err != nil { - return err + return 0, err } var keys []string keys = append(keys, strings.TrimSpace(string(sshPubKeyBuf))) - virtioJournalConfig.AddAuthorizedKeys("core", keys) + virtioJournalConfig.AddAuthorizedKeys("nest", keys) liveConfig := *virtioJournalConfig liveConfig.AddSystemdUnit("live-signal-ok.service", liveSignalOKUnit, conf.Enable) + liveConfig.AddSystemdUnit("coreos-test-entered-emergency-target.service", signalFailureUnit, conf.Enable) if offline { contents := fmt.Sprintf(downloadCheck, kola.CosaBuild.Meta.BuildID, kola.CosaBuild.Meta.OstreeCommit) @@ -606,11 +649,12 @@ func testPXE(ctx context.Context, inst platform.Install, outdir string, offline targetConfig := *virtioJournalConfig targetConfig.AddSystemdUnit("coreos-test-installer.service", signalCompletionUnit, conf.Enable) + targetConfig.AddSystemdUnit("coreos-test-entered-emergency-target.service", signalFailureUnit, conf.Enable) targetConfig.AddSystemdUnit("coreos-test-installer-no-ignition.service", checkNoIgnition, conf.Enable) mach, err := inst.PXE(pxeKernelArgs, liveConfig, targetConfig, offline) if err != nil { - return errors.Wrapf(err, "running PXE") + return 0, errors.Wrapf(err, "running PXE") } defer func() { if err := mach.Destroy(); err != nil { @@ -621,38 +665,40 @@ func testPXE(ctx context.Context, inst platform.Install, outdir string, offline return awaitCompletion(ctx, mach.QemuInst, outdir, completionChannel, mach.BootStartedErrorChannel, []string{liveOKSignal, signalCompleteString}) } -func testLiveIso(ctx context.Context, inst platform.Install, outdir string, offline, minimal bool) error { +func testLiveIso(ctx context.Context, inst platform.Install, outdir string, offline, minimal bool) (time.Duration, error) { tmpd, err := ioutil.TempDir("", "kola-testiso") if err != nil { - return err + return 0, err } defer os.RemoveAll(tmpd) sshPubKeyBuf, _, err := util.CreateSSHAuthorizedKey(tmpd) if err != nil { - return err + return 0, err } builder, virtioJournalConfig, err := newQemuBuilderWithDisk(outdir) if err != nil { - return err + return 0, err } inst.Builder = builder completionChannel, err := inst.Builder.VirtioChannelRead("testisocompletion") if err != nil { - return err + return 0, err } var keys []string keys = append(keys, strings.TrimSpace(string(sshPubKeyBuf))) - virtioJournalConfig.AddAuthorizedKeys("core", keys) + virtioJournalConfig.AddAuthorizedKeys("nest", keys) liveConfig := *virtioJournalConfig liveConfig.AddSystemdUnit("live-signal-ok.service", liveSignalOKUnit, conf.Enable) liveConfig.AddSystemdUnit("verify-no-efi-boot-entry.service", verifyNoEFIBootEntry, conf.Enable) + liveConfig.AddSystemdUnit("coreos-test-entered-emergency-target.service", signalFailureUnit, conf.Enable) targetConfig := *virtioJournalConfig targetConfig.AddSystemdUnit("coreos-test-installer.service", signalCompletionUnit, conf.Enable) + targetConfig.AddSystemdUnit("coreos-test-entered-emergency-target.service", signalFailureUnit, conf.Enable) targetConfig.AddSystemdUnit("coreos-test-installer-no-ignition.service", checkNoIgnition, conf.Enable) if inst.MultiPathDisk { targetConfig.AddSystemdUnit("coreos-test-installer-multipathed.service", multipathedRoot, conf.Enable) @@ -666,7 +712,7 @@ func testLiveIso(ctx context.Context, inst platform.Install, outdir string, offl mach, err := inst.InstallViaISOEmbed(nil, liveConfig, targetConfig, outdir, offline, minimal) if err != nil { - return errors.Wrapf(err, "running iso install") + return 0, errors.Wrapf(err, "running iso install") } defer func() { if err := mach.Destroy(); err != nil { @@ -677,22 +723,22 @@ func testLiveIso(ctx context.Context, inst platform.Install, outdir string, offl return awaitCompletion(ctx, mach.QemuInst, outdir, completionChannel, mach.BootStartedErrorChannel, []string{liveOKSignal, signalCompleteString}) } -func testLiveLogin(ctx context.Context, outdir string) error { +func testLiveLogin(ctx context.Context, outdir string) (time.Duration, error) { builddir := kola.CosaBuild.Dir isopath := filepath.Join(builddir, kola.CosaBuild.Meta.BuildArtifacts.LiveIso.Path) builder, err := newBaseQemuBuilder(outdir) if err != nil { - return nil + return 0, err } defer builder.Close() // Drop the bootindex bit (applicable to all arches except s390x and ppc64le); we want it to be the default if err := builder.AddIso(isopath, "", false); err != nil { - return err + return 0, err } - completionChannel, err := builder.VirtioChannelRead("nestos.liveiso-success") + completionChannel, err := builder.VirtioChannelRead("coreos.liveiso-success") if err != nil { - return err + return 0, err } // No network device to test https://github.com/coreos/fedora-coreos-config/pull/326 @@ -700,29 +746,29 @@ func testLiveLogin(ctx context.Context, outdir string) error { mach, err := builder.Exec() if err != nil { - return errors.Wrapf(err, "running iso") + return 0, errors.Wrapf(err, "running iso") } defer mach.Destroy() return awaitCompletion(ctx, mach, outdir, completionChannel, nil, []string{"coreos-liveiso-success"}) } -func testAsDisk(ctx context.Context, outdir string) error { +func testAsDisk(ctx context.Context, outdir string) (time.Duration, error) { builddir := kola.CosaBuild.Dir isopath := filepath.Join(builddir, kola.CosaBuild.Meta.BuildArtifacts.LiveIso.Path) builder, config, err := newQemuBuilder(outdir) if err != nil { - return nil + return 0, err } defer builder.Close() // Drop the bootindex bit (applicable to all arches except s390x and ppc64le); we want it to be the default if err := builder.AddIso(isopath, "", true); err != nil { - return err + return 0, err } completionChannel, err := builder.VirtioChannelRead("testisocompletion") if err != nil { - return err + return 0, err } config.AddSystemdUnit("live-signal-ok.service", liveSignalOKUnit, conf.Enable) @@ -731,7 +777,7 @@ func testAsDisk(ctx context.Context, outdir string) error { mach, err := builder.Exec() if err != nil { - return errors.Wrapf(err, "running iso") + return 0, errors.Wrapf(err, "running iso") } defer mach.Destroy() diff --git a/mantle/cmd/kolet/kolet.go b/mantle/cmd/kolet/kolet.go index 49e1128d..638ebca3 100644 --- a/mantle/cmd/kolet/kolet.go +++ b/mantle/cmd/kolet/kolet.go @@ -15,6 +15,7 @@ package main import ( + "context" "encoding/json" "fmt" "io/ioutil" @@ -96,14 +97,14 @@ const ( autopkgTestRebootPath = "/tmp/autopkgtest-reboot" autopkgtestRebootScript = `#!/bin/bash set -xeuo pipefail -~core/kolet reboot-request "$1" +~nest/kolet reboot-request "$1" reboot ` autopkgTestRebootPreparePath = "/tmp/autopkgtest-reboot-prepare" autopkgtestRebootPrepareScript = `#!/bin/bash set -euo pipefail -exec ~core/kolet reboot-request "$1" +exec ~nest/kolet reboot-request "$1" ` // File used to communicate between the script and the kolet runner internally @@ -190,8 +191,8 @@ func registerTestMap(m map[string]*register.Test) { // dispatchRunExtUnit returns true if unit completed successfully, false if // it's still running (or unit was terminated by SIGTERM) -func dispatchRunExtUnit(unitname string, sdconn *systemddbus.Conn) (bool, error) { - props, err := sdconn.GetAllProperties(unitname) +func dispatchRunExtUnit(ctx context.Context, unitname string, sdconn *systemddbus.Conn) (bool, error) { + props, err := sdconn.GetAllPropertiesContext(ctx, unitname) if err != nil { return false, errors.Wrapf(err, "listing unit properties") } @@ -206,7 +207,7 @@ func dispatchRunExtUnit(unitname string, sdconn *systemddbus.Conn) (bool, error) switch state { case "inactive": - _, err := sdconn.StartUnit(unitname, "fail", nil) + _, err := sdconn.StartUnitContext(ctx, unitname, "fail", nil) return false, err case "activating": return false, nil @@ -294,19 +295,20 @@ func runExtUnit(cmd *cobra.Command, args []string) error { }() } + ctx := context.Background() unitname := args[0] // Restrict this to services, don't need to support anything else right now if !strings.HasSuffix(unitname, ".service") { unitname = unitname + ".service" } - sdconn, err := systemddbus.NewSystemConnection() + sdconn, err := systemddbus.NewSystemConnectionContext(ctx) if err != nil { return errors.Wrapf(err, "systemd connection") } // Start the unit; it's not started by default because we need to // do some preparatory work above (and some is done in the harness) - if _, err := sdconn.StartUnit(unitname, "fail", nil); err != nil { + if _, err := sdconn.StartUnitContext(ctx, unitname, "fail", nil); err != nil { return errors.Wrapf(err, "starting unit") } @@ -314,7 +316,7 @@ func runExtUnit(cmd *cobra.Command, args []string) error { return err } // Check the status now to avoid any race conditions - _, err = dispatchRunExtUnit(unitname, sdconn) + _, err = dispatchRunExtUnit(ctx, unitname, sdconn) if err != nil { return err } @@ -336,7 +338,7 @@ func runExtUnit(cmd *cobra.Command, args []string) error { for n := range m { if n == unitname { systemdjournal.Print(systemdjournal.PriInfo, "Dispatching %s", n) - r, err := dispatchRunExtUnit(unitname, sdconn) + r, err := dispatchRunExtUnit(ctx, unitname, sdconn) systemdjournal.Print(systemdjournal.PriInfo, "Done dispatching %s", n) if err != nil { return err diff --git a/mantle/cmd/ore/aliyun/visibility.go b/mantle/cmd/ore/aliyun/visibility.go index 097b3df6..6b6372cf 100644 --- a/mantle/cmd/ore/aliyun/visibility.go +++ b/mantle/cmd/ore/aliyun/visibility.go @@ -64,7 +64,7 @@ func changeVisibility(cmd *cobra.Command, args []string) error { } for _, pair := range args { - if strings.Contains(pair, ":") == false { + if !strings.Contains(pair, ":") { return fmt.Errorf("Argument isn't a valid region:image pair: %v\n", pair) } diff --git a/mantle/cmd/ore/aws/upload.go b/mantle/cmd/ore/aws/upload.go index 28c87abf..5497bf08 100644 --- a/mantle/cmd/ore/aws/upload.go +++ b/mantle/cmd/ore/aws/upload.go @@ -33,7 +33,7 @@ var ( Short: "Create AWS images", Long: `Upload CoreOS image to S3 and create an AMI. -Supported source formats are VMDK (as created with ./image_to_vm --format=ami_vmdk) and RAW. +Supported source formats are VMDK (as created with cosa buildextend-aws) and RAW. After a successful run, the final line of output will be a line of JSON describing the relevant resources. `, @@ -60,6 +60,7 @@ After a successful run, the final line of output will be a line of JSON describi uploadObjectFormat aws.EC2ImageFormat uploadAMIName string uploadAMIDescription string + uploadPublic bool uploadGrantUsers []string uploadGrantUsersSnapshot []string uploadTags []string @@ -80,6 +81,7 @@ func init() { cmdUpload.Flags().Var(&uploadObjectFormat, "object-format", fmt.Sprintf("object format: %s or %s (default: %s)", aws.EC2ImageFormatVmdk, aws.EC2ImageFormatRaw, aws.EC2ImageFormatVmdk)) cmdUpload.Flags().StringVar(&uploadAMIName, "ami-name", "", "name of the AMI to create") cmdUpload.Flags().StringVar(&uploadAMIDescription, "ami-description", "", "description of the AMI to create (default: empty)") + cmdUpload.Flags().BoolVar(&uploadPublic, "public", false, "make image and snapshot volume public") cmdUpload.Flags().StringSliceVar(&uploadGrantUsers, "grant-user", []string{}, "grant launch permission to this AWS user ID") cmdUpload.Flags().StringSliceVar(&uploadGrantUsersSnapshot, "grant-user-snapshot", []string{}, "grant snapshot volume permission to this AWS user ID") cmdUpload.Flags().StringSliceVar(&uploadTags, "tags", []string{}, "list of key=value tags to attach to the AMI") @@ -264,6 +266,15 @@ func runUpload(cmd *cobra.Command, args []string) error { } } + // make the image/snaphsot public if requested + if uploadPublic { + err = API.PublishImage(amiID) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to make image/snapshot public: %v\n", err) + os.Exit(1) + } + } + tagMap := make(map[string]string) for _, tag := range uploadTags { splitTag := strings.SplitN(tag, "=", 2) diff --git a/mantle/cmd/ore/azure/create-image.go b/mantle/cmd/ore/azure/create-image.go index bfc5b6ca..f2660db3 100644 --- a/mantle/cmd/ore/azure/create-image.go +++ b/mantle/cmd/ore/azure/create-image.go @@ -1,4 +1,5 @@ -// Copyright 2016 CoreOS, Inc. +// Copyright 2022 Red Hat +// Copyright 2018 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,49 +16,63 @@ package azure import ( - "time" + "encoding/json" + "fmt" + "os" "github.com/spf13/cobra" - - "github.com/coreos/mantle/platform/api/azure" ) var ( cmdCreateImage = &cobra.Command{ - Use: "create-image", - Short: "Create Azure image", - Long: "Create Azure image from a local VHD file", - RunE: runCreateImage, + Use: "create-image", + Short: "Create Azure image", + Long: "Create Azure image from a blob url", + RunE: runCreateImage, + Aliases: []string{"create-image-arm"}, SilenceUsage: true, } - // create image options - md azure.OSImage + imageName string + blobUrl string + resourceGroup string ) -func today() string { - return time.Now().Format("2006-01-02") -} - func init() { sv := cmdCreateImage.Flags().StringVar - sv(&md.Name, "name", "", "image name") - sv(&md.Label, "label", "", "image label") - sv(&md.Description, "description", "", "image description") - sv(&md.MediaLink, "blob", "", "source blob url") - sv(&md.ImageFamily, "family", "", "image family") - sv(&md.PublishedDate, "published-date", today(), "image published date, parsed as RFC3339") - sv(&md.RecommendedVMSize, "recommended-vm-size", "Medium", "recommended VM size") - sv(&md.IconURI, "icon-uri", "coreos-globe-color-lg-100px.png", "icon URI") - sv(&md.SmallIconURI, "small-icon-uri", "coreos-globe-color-lg-45px.png", "small icon URI") + sv(&imageName, "image-name", "", "image name") + sv(&blobUrl, "image-blob", "", "source blob url") + sv(&resourceGroup, "resource-group", "kola", "resource group name") Azure.AddCommand(cmdCreateImage) } func runCreateImage(cmd *cobra.Command, args []string) error { - md.Category = "Public" - md.OS = "Linux" - return api.AddOSImage(&md) + if err := api.SetupClients(); err != nil { + fmt.Fprintf(os.Stderr, "setting up clients: %v\n", err) + os.Exit(1) + } + img, err := api.CreateImage(imageName, resourceGroup, blobUrl) + if err != nil { + fmt.Fprintf(os.Stderr, "Couldn't create image: %v\n", err) + os.Exit(1) + } + if img.ID == nil { + fmt.Fprintf(os.Stderr, "received nil image\n") + os.Exit(1) + } + err = json.NewEncoder(os.Stdout).Encode(&struct { + ID *string + Location *string + }{ + ID: img.ID, + Location: img.Location, + }) + if err != nil { + fmt.Fprintf(os.Stderr, "Couldn't encode result: %v\n", err) + os.Exit(1) + } + return nil } diff --git a/mantle/cmd/ore/azure/create-image-arm.go b/mantle/cmd/ore/azure/delete-blob.go similarity index 36% rename from mantle/cmd/ore/azure/create-image-arm.go rename to mantle/cmd/ore/azure/delete-blob.go index 0c137879..ae8e391f 100644 --- a/mantle/cmd/ore/azure/create-image-arm.go +++ b/mantle/cmd/ore/azure/delete-blob.go @@ -1,4 +1,4 @@ -// Copyright 2018 CoreOS, Inc. +// Copyright 2022 Red Hat // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,63 +14,62 @@ package azure -import ( - "encoding/json" - "fmt" - "os" - - "github.com/spf13/cobra" -) +import "github.com/spf13/cobra" var ( - cmdCreateImageARM = &cobra.Command{ - Use: "create-image-arm", - Short: "Create Azure image", - Long: "Create Azure image from a blob url", - RunE: runCreateImageARM, - - SilenceUsage: true, + cmdDeleteBlob = &cobra.Command{ + Use: "delete-blob", + Short: "Upload a blob to Azure storage", + Run: runDeleteBlob, } - imageName string - blobUrl string - resourceGroup string + // delete blob options + dbo struct { + storageacct string + container string + blob string + } ) func init() { - sv := cmdCreateImageARM.Flags().StringVar + sv := cmdDeleteBlob.Flags().StringVar - sv(&imageName, "image-name", "", "image name") - sv(&blobUrl, "image-blob", "", "source blob url") - sv(&resourceGroup, "resource-group", "kola", "resource group name") + sv(&dbo.storageacct, "storage-account", "kola", "storage account name") + sv(&dbo.container, "container", "vhds", "container name") + sv(&dbo.blob, "blob-name", "", "name of the blob") + sv(&resourceGroup, "resource-group", "kola", "resource group name that owns the storage account") - Azure.AddCommand(cmdCreateImageARM) + Azure.AddCommand(cmdDeleteBlob) } -func runCreateImageARM(cmd *cobra.Command, args []string) error { +func runDeleteBlob(cmd *cobra.Command, args []string) { + if err := api.SetupClients(); err != nil { - fmt.Fprintf(os.Stderr, "setting up clients: %v\n", err) - os.Exit(1) + plog.Fatalf("setting up clients: %v\n", err) } - img, err := api.CreateImage(imageName, resourceGroup, blobUrl) + + kr, err := api.GetStorageServiceKeys(dbo.storageacct, resourceGroup) if err != nil { - fmt.Fprintf(os.Stderr, "Couldn't create image: %v\n", err) - os.Exit(1) + plog.Fatalf("Fetching storage service keys failed: %v", err) } - if img.ID == nil { - fmt.Fprintf(os.Stderr, "received nil image\n") - os.Exit(1) + + if kr.Keys == nil || len(*kr.Keys) == 0 { + plog.Fatalf("No storage service keys found") } - err = json.NewEncoder(os.Stdout).Encode(&struct { - ID *string - Location *string - }{ - ID: img.ID, - Location: img.Location, - }) + + k := (*kr.Keys)[0] + exists, err := api.BlobExists(dbo.storageacct, *k.Value, dbo.container, dbo.blob) if err != nil { - fmt.Fprintf(os.Stderr, "Couldn't encode result: %v\n", err) - os.Exit(1) + plog.Fatalf("Checking if blob exists failed: %v", err) + } + + if !exists { + plog.Infof("Blob doesn't exist. No need to delete.") + } else { + plog.Infof("Deleting blob.") + err = api.DeleteBlob(dbo.storageacct, *k.Value, dbo.container, dbo.blob) + if err != nil { + plog.Fatalf("Deleting blob failed: %v", err) + } } - return nil } diff --git a/mantle/cmd/ore/azure/delete-image.go b/mantle/cmd/ore/azure/delete-image.go index 6cbd3ac2..46f6b11c 100644 --- a/mantle/cmd/ore/azure/delete-image.go +++ b/mantle/cmd/ore/azure/delete-image.go @@ -1,4 +1,4 @@ -// Copyright 2019 Red Hat +// Copyright 2022 Red Hat // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,10 +23,11 @@ import ( var ( cmdDeleteImage = &cobra.Command{ - Use: "delete-image-arm", - Short: "Delete Azure image", - Long: "Remove an image from Azure.", - RunE: runDeleteImage, + Use: "delete-image", + Short: "Delete Azure image", + Long: "Remove an image from Azure.", + RunE: runDeleteImage, + Aliases: []string{"delete-image-arm"}, SilenceUsage: true, } diff --git a/mantle/cmd/ore/azure/replicate-image.go b/mantle/cmd/ore/azure/replicate-image.go deleted file mode 100644 index 9f32fe71..00000000 --- a/mantle/cmd/ore/azure/replicate-image.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "fmt" - - "github.com/coreos/go-semver/semver" - "github.com/spf13/cobra" -) - -var ( - cmdReplicateImage = &cobra.Command{ - Use: "replicate-image image", - Short: "Replicate an OS image in Azure", - RunE: runReplicateImage, - - SilenceUsage: true, - } - - defaultRegions = []string{ - "East US", - "West US", - "South Central US", - "Central US", - "North Central US", - "East US 2", - "North Europe", - "West Europe", - "Southeast Asia", - "East Asia", - "Japan West", - "Japan East", - "Brazil South", - "Australia Southeast", - "Australia East", - "Central India", - "South India", - "West India", - "Canada Central", - "Canada East", - "UK North", - "UK South 2", - "West US 2", - "West Central US", - "UK West", - "UK South", - "Central US EUAP", - "East US 2 EUAP", - } - - // replicate image options - rio struct { - offer string - sku string - version string - regions []string - } -) - -func init() { - sv := cmdReplicateImage.Flags().StringVar - - sv(&rio.offer, "offer", "CoreOS", "Azure image product name") - sv(&rio.sku, "sku", "", "Azure image SKU (stable, beta, alpha for CoreOS)") - sv(&rio.version, "version", "", "Azure image version") - - cmdReplicateImage.Flags().StringSliceVar(&rio.regions, "region", defaultRegions, - "Azure regions to replicate to") - - Azure.AddCommand(cmdReplicateImage) -} - -func runReplicateImage(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return fmt.Errorf("expecting 1 argument, got %d", len(args)) - } - - if rio.offer == "" { - return fmt.Errorf("offer name is required") - } - - if rio.sku == "" { - return fmt.Errorf("sku is required") - } - - if rio.version == "" { - return fmt.Errorf("version is required") - } - - _, err := semver.NewVersion(rio.version) - if err != nil { - return fmt.Errorf("version is not valid semver: %v", err) - } - - return api.ReplicateImage(args[0], rio.offer, rio.sku, rio.version, rio.regions...) -} diff --git a/mantle/cmd/ore/azure/upload-blob-arm.go b/mantle/cmd/ore/azure/upload-blob-arm.go deleted file mode 100644 index 1223ced8..00000000 --- a/mantle/cmd/ore/azure/upload-blob-arm.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/Microsoft/azure-vhd-utils/vhdcore/validator" - "github.com/spf13/cobra" - - "github.com/coreos/mantle/sdk" -) - -var ( - cmdUploadBlobARM = &cobra.Command{ - Use: "upload-blob-arm", - Short: "Upload a blob to Azure storage", - Run: runUploadBlobARM, - } -) - -func init() { - bv := cmdUploadBlobARM.Flags().BoolVar - sv := cmdUploadBlobARM.Flags().StringVar - - bv(&ubo.overwrite, "overwrite", false, "overwrite blob") - bv(&ubo.validate, "validate", true, "validate blob as VHD file") - - sv(&ubo.storageacct, "storage-account", "kola", "storage account name") - sv(&ubo.container, "container", "vhds", "container name") - sv(&ubo.blob, "blob-name", "", "name of the blob") - sv(&ubo.vhd, "file", defaultUploadFile(), "path to CoreOS image (build with ./image_to_vm.sh --format=azure ...)") - sv(&resourceGroup, "resource-group", "kola", "resource group name that owns the storage account") - - Azure.AddCommand(cmdUploadBlobARM) -} - -func defaultUploadFile() string { - build := sdk.BuildRoot() - return build + "/images/amd64-usr/latest/coreos_production_azure_image.vhd" -} - -func runUploadBlobARM(cmd *cobra.Command, args []string) { - if ubo.blob == "" { - ver, err := sdk.VersionsFromDir(filepath.Dir(ubo.vhd)) - if err != nil { - plog.Fatalf("Unable to get version from iamge directory, provide a -blob-name flag or include a version.txt in the image directory: %v\n", err) - } - ubo.blob = fmt.Sprintf("Container-Linux-dev-%s-%s.vhd", os.Getenv("USER"), ver.Version) - } - - if err := api.SetupClients(); err != nil { - plog.Fatalf("setting up clients: %v\n", err) - } - - if ubo.validate { - plog.Printf("Validating VHD %q", ubo.vhd) - if !strings.HasSuffix(strings.ToLower(ubo.blob), ".vhd") { - plog.Fatalf("Blob name should end with .vhd") - } - - if !strings.HasSuffix(strings.ToLower(ubo.vhd), ".vhd") { - plog.Fatalf("Image should end with .vhd") - } - - if err := validator.ValidateVhd(ubo.vhd); err != nil { - plog.Fatal(err) - } - - if err := validator.ValidateVhdSize(ubo.vhd); err != nil { - plog.Fatal(err) - } - } - - kr, err := api.GetStorageServiceKeysARM(ubo.storageacct, resourceGroup) - if err != nil { - plog.Fatalf("Fetching storage service keys failed: %v", err) - } - - if kr.Keys == nil || len(*kr.Keys) == 0 { - plog.Fatalf("No storage service keys found") - } - - //only use the first service key to avoid uploading twice - //see https://github.com/coreos/coreos-assembler/pull/1849 - k := (*kr.Keys)[0] - if err := api.UploadBlob(ubo.storageacct, *k.Value, ubo.vhd, ubo.container, ubo.blob, ubo.overwrite); err != nil { - plog.Fatalf("Uploading blob failed: %v", err) - } - - err = json.NewEncoder(os.Stdout).Encode(&struct { - URL string - }{ - URL: fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", ubo.storageacct, ubo.container, ubo.blob), - }) - - if err != nil { - plog.Fatal(err) - } -} diff --git a/mantle/cmd/ore/azure/upload-blob.go b/mantle/cmd/ore/azure/upload-blob.go index 3ba5893b..1d211a41 100644 --- a/mantle/cmd/ore/azure/upload-blob.go +++ b/mantle/cmd/ore/azure/upload-blob.go @@ -1,4 +1,5 @@ -// Copyright 2016 CoreOS, Inc. +// Copyright 2022 Red Hat +// Copyright 2018 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +16,9 @@ package azure import ( + "encoding/json" "fmt" + "os" "strings" "github.com/Microsoft/azure-vhd-utils/vhdcore/validator" @@ -24,9 +27,10 @@ import ( var ( cmdUploadBlob = &cobra.Command{ - Use: "upload-blob storage-account container blob-name file", - Short: "Upload a blob to Azure storage", - Run: runUploadBlob, + Use: "upload-blob", + Short: "Upload a blob to Azure storage", + Run: runUploadBlob, + Aliases: []string{"upload-blob-arm"}, } // upload blob options @@ -42,22 +46,31 @@ var ( func init() { bv := cmdUploadBlob.Flags().BoolVar + sv := cmdUploadBlob.Flags().StringVar bv(&ubo.overwrite, "overwrite", false, "overwrite blob") bv(&ubo.validate, "validate", true, "validate blob as VHD file") + sv(&ubo.storageacct, "storage-account", "kola", "storage account name") + sv(&ubo.container, "container", "vhds", "container name") + sv(&ubo.blob, "blob-name", "", "name of the blob") + sv(&ubo.vhd, "file", "", "path to CoreOS VHD image") + sv(&resourceGroup, "resource-group", "kola", "resource group name that owns the storage account") + Azure.AddCommand(cmdUploadBlob) } func runUploadBlob(cmd *cobra.Command, args []string) { - if len(args) != 4 { - plog.Fatalf("Expecting 4 arguments, got %d", len(args)) + if ubo.vhd == "" { + plog.Fatal("--file is required") + } + if ubo.blob == "" { + plog.Fatal("--blob-name is required") } - ubo.storageacct = args[0] - ubo.container = args[1] - ubo.blob = args[2] - ubo.vhd = args[3] + if err := api.SetupClients(); err != nil { + plog.Fatalf("setting up clients: %v\n", err) + } if ubo.validate { plog.Printf("Validating VHD %q", ubo.vhd) @@ -65,6 +78,10 @@ func runUploadBlob(cmd *cobra.Command, args []string) { plog.Fatalf("Blob name should end with .vhd") } + if !strings.HasSuffix(strings.ToLower(ubo.vhd), ".vhd") { + plog.Fatalf("Image should end with .vhd") + } + if err := validator.ValidateVhd(ubo.vhd); err != nil { plog.Fatal(err) } @@ -74,16 +91,29 @@ func runUploadBlob(cmd *cobra.Command, args []string) { } } - kr, err := api.GetStorageServiceKeys(ubo.storageacct) + kr, err := api.GetStorageServiceKeys(ubo.storageacct, resourceGroup) if err != nil { plog.Fatalf("Fetching storage service keys failed: %v", err) } - if err := api.UploadBlob(ubo.storageacct, kr.PrimaryKey, ubo.vhd, ubo.container, ubo.blob, ubo.overwrite); err != nil { + if kr.Keys == nil || len(*kr.Keys) == 0 { + plog.Fatalf("No storage service keys found") + } + + //only use the first service key to avoid uploading twice + //see https://github.com/coreos/coreos-assembler/pull/1849 + k := (*kr.Keys)[0] + if err := api.UploadBlob(ubo.storageacct, *k.Value, ubo.vhd, ubo.container, ubo.blob, ubo.overwrite); err != nil { plog.Fatalf("Uploading blob failed: %v", err) } - uri := fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", ubo.storageacct, ubo.container, ubo.blob) + err = json.NewEncoder(os.Stdout).Encode(&struct { + URL string + }{ + URL: fmt.Sprintf("https://%s.blob.core.windows.net/%s/%s", ubo.storageacct, ubo.container, ubo.blob), + }) - plog.Printf("Blob uploaded to %q", uri) + if err != nil { + plog.Fatal(err) + } } diff --git a/mantle/cmd/ore/esx/create-base.go b/mantle/cmd/ore/esx/create-base.go index 7301f150..4fba34ff 100644 --- a/mantle/cmd/ore/esx/create-base.go +++ b/mantle/cmd/ore/esx/create-base.go @@ -17,9 +17,7 @@ package esx import ( "fmt" "os" - "path/filepath" - "github.com/coreos/mantle/sdk" "github.com/spf13/cobra" ) @@ -42,24 +40,21 @@ After a successful run, the final line of output will be the name of the VM crea func init() { ESX.AddCommand(cmdCreateBase) - cmdCreateBase.Flags().StringVar(&ovaPath, "file", - sdk.BuildRoot()+"/images/amd64-usr/latest/coreos_production_vmware_ova.ova", - "path to CoreOS image (build with: ./image_to_vm.sh --format=vmware_ova ...)") + cmdCreateBase.Flags().StringVar(&ovaPath, "file", "", "path to VMware OVA image") cmdCreateBase.Flags().StringVar(&baseVMName, "name", "", "name of base VM") } func runBaseCreate(cmd *cobra.Command, args []string) error { - vmName := baseVMName - if vmName == "" { - ver, err := sdk.VersionsFromDir(filepath.Dir(ovaPath)) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to get version from image directory, provide a -name flag or include a version.txt in the image directory: %v\n", err) - os.Exit(1) - } - vmName = ver.Version + if ovaPath == "" { + fmt.Fprintf(os.Stderr, "--file is required\n") + os.Exit(1) + } + if baseVMName == "" { + fmt.Fprintf(os.Stderr, "--name is required\n") + os.Exit(1) } - err := API.CreateBaseDevice(vmName, ovaPath) + err := API.CreateBaseDevice(baseVMName, ovaPath) if err != nil { fmt.Fprintf(os.Stderr, "Couldn't create base VM: %v\n", err) os.Exit(1) diff --git a/mantle/cmd/ore/gcloud/gcloud.go b/mantle/cmd/ore/gcloud/gcloud.go index b041636a..d439fb20 100644 --- a/mantle/cmd/ore/gcloud/gcloud.go +++ b/mantle/cmd/ore/gcloud/gcloud.go @@ -40,7 +40,7 @@ func init() { sv := GCloud.PersistentFlags().StringVar sv(&opts.Image, "image", "", "image name") - sv(&opts.Project, "project", "coreos-gce-testing", "project") + sv(&opts.Project, "project", "fedora-coreos-devel", "project") sv(&opts.Zone, "zone", "us-central1-a", "zone") sv(&opts.MachineType, "machinetype", "n1-standard-1", "machine type") sv(&opts.DiskType, "disktype", "pd-ssd", "disk type") diff --git a/mantle/cmd/ore/gcloud/index.go b/mantle/cmd/ore/gcloud/index.go deleted file mode 100644 index cb174d1c..00000000 --- a/mantle/cmd/ore/gcloud/index.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2014 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gcloud - -import ( - "fmt" - "net/http" - "os" - - "github.com/spf13/cobra" - "golang.org/x/net/context" - - "github.com/coreos/mantle/auth" - "github.com/coreos/mantle/storage" - "github.com/coreos/mantle/storage/index" -) - -var ( - indexDryRun bool - indexForce bool - indexDelete bool - indexDirs bool - indexRecursive bool - indexTitle string - cmdIndex = &cobra.Command{ - Use: "index [options] gs://bucket/prefix/ [gs://...]", - Short: "Update HTML indexes", - Run: runIndex, - Long: `Update HTML indexes for Google Storage. - -Scan a given Google Storage location and generate "index.html" under -every directory prefix. If the --directories option is given then -objects matching the directory prefixes are also created. For example, -the pages generated for a bucket containing only "dir/obj": - - index.html - a HTML index page listing dir - dir/index.html - a HTML index page listing obj - dir/ - an identical HTML index page - dir - a redirect page to dir/ - -Do not enable --directories if you expect to be able to copy the tree to -a local filesystem, the fake directories will conflict with the real ones!`, - } -) - -func init() { - cmdIndex.Flags().BoolVarP(&indexDryRun, - "dry-run", "n", false, - "perform a trial run with no changes") - cmdIndex.Flags().BoolVarP(&indexForce, - "force", "f", false, - "overwrite objects even if they appear up to date") - cmdIndex.Flags().BoolVar(&indexDelete, - "delete", false, "delete index objects") - cmdIndex.Flags().BoolVarP(&indexRecursive, "recursive", "r", false, - "update nested prefixes") - cmdIndex.Flags().BoolVarP(&indexDirs, - "directories", "D", false, - "use objects to mimic a directory tree") - cmdIndex.Flags().StringVarP(&indexTitle, "html-title", "T", "", - "use the given title instead of bucket name in index pages") - GCloud.AddCommand(cmdIndex) -} - -func runIndex(cmd *cobra.Command, args []string) { - if len(args) == 0 { - fmt.Fprintf(os.Stderr, "No URLs specified\n") - os.Exit(2) - } - - ctx := context.Background() - client, err := auth.GoogleClient() - if err != nil { - fmt.Fprintf(os.Stderr, "Authentication failed: %v\n", err) - os.Exit(1) - } - - for _, url := range args { - if err := updateTree(ctx, client, url); err != nil { - fmt.Fprintf(os.Stderr, "Failed: %v\n", err) - os.Exit(1) - } - } - - if indexDryRun { - fmt.Printf("Dry-run successful!\n") - } else { - fmt.Printf("Update successful!\n") - } -} - -func updateTree(ctx context.Context, client *http.Client, url string) error { - root, err := storage.NewBucket(client, url) - if err != nil { - return err - } - root.WriteDryRun(indexDryRun) - root.WriteAlways(indexForce) - - if err = root.FetchPrefix(ctx, root.Prefix(), indexRecursive); err != nil { - return err - } - - job := index.IndexJob{Bucket: root} - job.DirectoryHTML(indexDirs) - job.IndexHTML(true) - job.Delete(indexDelete) - job.Recursive(indexRecursive) - if indexTitle != "" { - job.Name(indexTitle) - } - return job.Do(ctx) -} diff --git a/mantle/cmd/ore/gcloud/promote-image.go b/mantle/cmd/ore/gcloud/promote-image.go index c6137b53..de80221f 100644 --- a/mantle/cmd/ore/gcloud/promote-image.go +++ b/mantle/cmd/ore/gcloud/promote-image.go @@ -92,9 +92,12 @@ func runPromoteImage(cmd *cobra.Command, args []string) { continue } // Some debug messages which are useful when needed. - //nolint // Add nolint to pass golang-ci lint, - // drop it when the error is gone, it's already fixed - // see https://github.com/googleapis/google-api-go-client/issues/767 + // This triggers the deprecation lint in golangci-lint because the + // docstring for the `Deprecated` field starts with "Deprecated: ". The + // docstring was tweaked to not trigger this, so we can drop this in the + // next vendor bump. See: + // https://github.com/googleapis/google-api-go-client/issues/767. + // nolint if image.Deprecated != nil { plog.Debugf("Deprecation state for %v is %v", image.Name, image.Deprecated.State) @@ -104,6 +107,7 @@ func runPromoteImage(cmd *cobra.Command, args []string) { // Perform the deprecation if the image is not already deprecated. // We detect if it is active by checking if it either doesn't // have any deprecation state or if it is explicitly ACTIVE. + // nolint (see comment above) if image.Deprecated == nil || image.Deprecated.State == string(gcloud.DeprecationStateActive) { deprecateImage( diff --git a/mantle/cmd/ore/gcloud/sync.go b/mantle/cmd/ore/gcloud/sync.go deleted file mode 100644 index 1088a166..00000000 --- a/mantle/cmd/ore/gcloud/sync.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gcloud - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "golang.org/x/net/context" - - "github.com/coreos/mantle/lang/worker" - "github.com/coreos/mantle/storage" - "github.com/coreos/mantle/storage/index" -) - -var ( - syncDryRun bool - syncForce bool - syncDelete bool - syncRecursive bool - syncIndexDirs bool - syncIndexPages bool - syncIndexTitle string - cmdSync = &cobra.Command{ - Use: "sync gs://src/foo gs://dst/bar", - Short: "Copy objects between GS buckets", - Run: runSync, - } -) - -func init() { - cmdSync.Flags().BoolVarP(&syncDryRun, "dry-run", "n", false, - "perform a trial run, do not make changes") - cmdSync.Flags().BoolVarP(&syncForce, "force", "f", false, - "write everything, even when already up-to-date") - cmdSync.Flags().BoolVar(&syncDelete, "delete", false, - "delete extra objects and indexes") - cmdSync.Flags().BoolVarP(&syncRecursive, "recursive", "r", false, - "sync nested prefixes") - cmdSync.Flags().BoolVarP(&syncIndexDirs, "index-dirs", "D", false, - "generate HTML pages to mimic a directory tree") - cmdSync.Flags().BoolVarP(&syncIndexPages, "index-html", "I", false, - "generate index.html pages for each directory") - cmdSync.Flags().StringVarP(&syncIndexTitle, "html-title", "T", "", - "use the given title instead of bucket name in index pages") - GCloud.AddCommand(cmdSync) -} - -func runSync(cmd *cobra.Command, args []string) { - if len(args) != 2 { - fmt.Fprintf(os.Stderr, "Expected exactly two gs:// URLs. Got: %v\n", args) - os.Exit(2) - } - - ctx := context.Background() - src, err := storage.NewBucket(api.Client(), args[0]) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } - src.WriteDryRun(true) // do not write to src - - dst, err := storage.NewBucket(api.Client(), args[1]) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } - dst.WriteDryRun(syncDryRun) - dst.WriteAlways(syncForce) - - err = worker.Parallel(ctx, - func(c context.Context) error { - return src.FetchPrefix(c, src.Prefix(), syncRecursive) - }, - func(c context.Context) error { - return dst.FetchPrefix(c, dst.Prefix(), syncRecursive) - }) - if err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } - - job := index.NewSyncIndexJob(src, dst) - job.DirectoryHTML(syncIndexDirs) - job.IndexHTML(syncIndexPages) - job.Delete(syncDelete) - job.Recursive(syncRecursive) - if syncIndexTitle != "" { - job.Name(syncIndexTitle) - } - if err := job.Do(ctx); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} diff --git a/mantle/cmd/ore/ibmcloud/copy-object.go b/mantle/cmd/ore/ibmcloud/copy-object.go index a188ab0a..11867e26 100644 --- a/mantle/cmd/ore/ibmcloud/copy-object.go +++ b/mantle/cmd/ore/ibmcloud/copy-object.go @@ -46,11 +46,17 @@ func init() { cmdCopyObject.Flags().StringVar(©CloudObjectStorage, "cloud-object-storage", "coreos-dev-image-ibmcloud", "cloud object storage to be used") cmdCopyObject.Flags().StringVar(&sourceBucket, "source-bucket", "coreos-dev-image-ibmcloud-us-east", "bucket where object needs to be copied from") cmdCopyObject.Flags().StringVar(&sourceName, "source-name", "", "name of object to be copied") - cmdCopyObject.MarkFlagRequired("source-name") + if err := cmdCopyObject.MarkFlagRequired("source-name"); err != nil { + panic(err) + } cmdCopyObject.Flags().StringVar(&destRegion, "destination-region", "", "region to be copied to") - cmdCopyObject.MarkFlagRequired("destination-region") + if err := cmdCopyObject.MarkFlagRequired("destination-region"); err != nil { + panic(err) + } cmdCopyObject.Flags().StringVar(&destBucket, "destination-bucket", "", "destination bucket to copy to") - cmdCopyObject.MarkFlagRequired("destination-bucket") + if err := cmdCopyObject.MarkFlagRequired("destination-bucket"); err != nil { + panic(err) + } } func runCopyObject(cmd *cobra.Command, args []string) error { diff --git a/mantle/cmd/ore/openstack/create.go b/mantle/cmd/ore/openstack/create.go index 1b4feb64..f8a6ab62 100644 --- a/mantle/cmd/ore/openstack/create.go +++ b/mantle/cmd/ore/openstack/create.go @@ -18,9 +18,9 @@ import ( "fmt" "os" - "github.com/coreos/mantle/sdk" - "github.com/coreos/mantle/system" "github.com/spf13/cobra" + + coreosarch "github.com/coreos/stream-metadata-go/arch" ) var ( @@ -45,16 +45,18 @@ After a successful run, the final line of output will be the ID of the image. func init() { OpenStack.AddCommand(cmdCreate) - cmdCreate.Flags().StringVar(&arch, "arch", system.RpmArch(), "The architecture of the image") - cmdCreate.Flags().StringVar(&path, "file", - sdk.BuildRoot()+"/images/amd64-usr/latest/coreos_production_openstack_image.img", - "path to CoreOS image (build with: ./image_to_vm.sh --format=openstack ...)") + cmdCreate.Flags().StringVar(&arch, "arch", coreosarch.CurrentRpmArch(), "The architecture of the image") + cmdCreate.Flags().StringVar(&path, "file", "", "path to OpenStack image") cmdCreate.Flags().StringVar(&name, "name", "", "image name") cmdCreate.Flags().StringVar(&visibility, "visibility", "private", "Image visibility within OpenStack") cmdCreate.Flags().BoolVar(&protected, "protected", false, "Image deletion protection") } func runCreate(cmd *cobra.Command, args []string) error { + if path == "" { + fmt.Fprintf(os.Stderr, "--file is required\n") + os.Exit(1) + } id, err := API.UploadImage(name, path, arch, visibility, protected) if err != nil { fmt.Fprintf(os.Stderr, "Couldn't create image: %v\n", err) diff --git a/mantle/cmd/plume/cosa2stream.go b/mantle/cmd/plume/cosa2stream.go index d0b39f74..b43f2afd 100644 --- a/mantle/cmd/plume/cosa2stream.go +++ b/mantle/cmd/plume/cosa2stream.go @@ -34,7 +34,7 @@ import ( const ( // This will hopefully migrate to mirror.openshift.com, see https://github.com/openshift/os/issues/477 - rhcosCosaEndpoint = "https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases" + rhcosCosaEndpoint = "https://rhcos.mirror.openshift.com/art/storage/releases" ) var ( diff --git a/mantle/cmd/plume/fcos.go b/mantle/cmd/plume/fcos.go deleted file mode 100644 index 3365eff6..00000000 --- a/mantle/cmd/plume/fcos.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2019 Red Hat Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "github.com/spf13/pflag" -) - -var ( - specBucket string - specRegion string - specProfile string - specPolicy string - specCommitId string - specArch string - specChannel string - specVersion string -) - -func AddFcosSpecFlags(flags *pflag.FlagSet) { - flags.StringVar(&specBucket, "bucket", "fcos-builds", "S3 bucket") - flags.StringVar(&specRegion, "region", "us-east-1", "S3 bucket region") - flags.StringVar(&specProfile, "profile", "default", "AWS profile") - flags.StringVar(&specPolicy, "policy", "public-read", "Canned ACL policy") -} - -func FcosValidateArguments() { - if specVersion == "" { - plog.Fatal("--version is required") - } - if specChannel == "" { - plog.Fatal("--channel is required") - } - if specBucket == "" { - plog.Fatal("--bucket is required") - } - if specRegion == "" { - plog.Fatal("--region is required") - } -} - -func FcosChannelSpec() fcosChannelSpec { - return fcosChannelSpec{ - Bucket: specBucket, - Profile: specProfile, - Region: specRegion, - } -} diff --git a/mantle/cmd/plume/fedora.go b/mantle/cmd/plume/fedora.go deleted file mode 100644 index 63bdaba2..00000000 --- a/mantle/cmd/plume/fedora.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2018 Red Hat Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "fmt" - - "github.com/spf13/pflag" -) - -var ( - specComposeID string - specEnv string - specRespin string - specImageType string - specTimestamp string - awsFedoraArches = []string{ - "x86_64", - "aarch64", - } - awsFedoraProdAccountPartitions = []awsPartitionSpec{ - awsPartitionSpec{ - Name: "AWS", - Profile: "default", - Bucket: "fedora-cloud-plume-ami-vmimport", - BucketRegion: "us-east-1", - LaunchPermissions: []string{ - "125523088429", // fedora production account - }, - Regions: []string{ - "ap-northeast-2", - "us-east-2", - "ap-southeast-1", - "ap-southeast-2", - "ap-south-1", - "eu-west-1", - "sa-east-1", - "us-east-1", - "us-west-2", - "us-west-1", - "eu-central-1", - "ap-northeast-1", - "ca-central-1", - "eu-west-2", - "eu-west-3", - }, - }, - } - awsFedoraDevAccountPartitions = []awsPartitionSpec{ - awsPartitionSpec{ - Name: "AWS", - Profile: "default", - Bucket: "prod-account-match-fedora-cloud-plume-ami-vmimport", - BucketRegion: "us-east-1", - LaunchPermissions: []string{ - "013116697141", // fedora community dev test account - }, - Regions: []string{ - "us-east-2", - "us-east-1", - }, - }, - } - - fedoraSpecs = map[string]channelSpec{ - "rawhide": channelSpec{ - BaseURL: "https://koji.fedoraproject.org/compose/rawhide", - Arches: awsFedoraArches, - AWS: awsSpec{ - BaseName: "Fedora", - BaseDescription: "Fedora Cloud Base AMI", - Image: "Fedora-{{.ImageType}}-{{.Version}}-{{.Timestamp}}.n.{{.Respin}}.{{.Arch}}.raw.xz", - Partitions: awsFedoraProdAccountPartitions, - }, - }, - "branched": channelSpec{ - BaseURL: "https://koji.fedoraproject.org/compose/branched", - Arches: awsFedoraArches, - AWS: awsSpec{ - BaseName: "Fedora", - BaseDescription: "Fedora Cloud Base AMI", - Image: "Fedora-{{.ImageType}}-{{.Version}}-{{.Timestamp}}.n.{{.Respin}}.{{.Arch}}.raw.xz", - Partitions: awsFedoraProdAccountPartitions, - }, - }, - "updates": channelSpec{ - BaseURL: "https://koji.fedoraproject.org/compose/updates", - Arches: awsFedoraArches, - AWS: awsSpec{ - BaseName: "Fedora", - BaseDescription: "Fedora Cloud Base AMI", - Image: "Fedora-{{.ImageType}}-{{.Version}}-{{.Timestamp}}.{{.Respin}}.{{.Arch}}.raw.xz", - Partitions: awsFedoraProdAccountPartitions, - }, - }, - "cloud": channelSpec{ - BaseURL: "https://koji.fedoraproject.org/compose/cloud", - Arches: awsFedoraArches, - AWS: awsSpec{ - BaseName: "Fedora", - BaseDescription: "Fedora Cloud Base AMI", - Image: "Fedora-{{.ImageType}}-{{.Version}}-{{.Timestamp}}.{{.Respin}}.{{.Arch}}.raw.xz", - Partitions: awsFedoraProdAccountPartitions, - }, - }, - } -) - -func AddFedoraSpecFlags(flags *pflag.FlagSet) { - flags.StringVar(&specEnv, "environment", "prod", "AMI upload environment") - flags.StringVar(&specImageType, "image-type", "Cloud-Base", "type of image") - flags.StringVar(&specTimestamp, "timestamp", "", "compose timestamp") - flags.StringVar(&specRespin, "respin", "0", "compose respin") - flags.StringVar(&specComposeID, "compose-id", "", "compose id") -} - -func ChannelFedoraSpec() (channelSpec, error) { - if specComposeID == "" { - plog.Fatal("--compose-id is required") - } - if specTimestamp == "" { - plog.Fatal("--timestamp is required") - } - if specVersion == "" { - plog.Fatal("--version is required") - } - if specArch == "" { - specArch = "x86_64" - } - - spec, ok := fedoraSpecs[specChannel] - if !ok { - return channelSpec{}, fmt.Errorf("Unknown channel: %q", specChannel) - } - - if specEnv == "dev" { - spec.AWS.Partitions = awsFedoraDevAccountPartitions - } - archOk := false - for _, arch := range spec.Arches { - if specArch == arch { - archOk = true - break - } - } - if !archOk { - plog.Fatalf("Unknown arch %q for channel %q", specArch, specChannel) - } - - return spec, nil -} diff --git a/mantle/cmd/plume/plume.go b/mantle/cmd/plume/plume.go index 3abc0971..ef8c95ec 100644 --- a/mantle/cmd/plume/plume.go +++ b/mantle/cmd/plume/plume.go @@ -27,14 +27,8 @@ var ( Use: "plume [command]", Short: "The CoreOS release utility", } - - gceJSONKeyFile string ) -func init() { - root.PersistentFlags().StringVar(&gceJSONKeyFile, "gce-json-key", "", "use a JSON key for authentication") -} - func main() { cli.Execute(root) } diff --git a/mantle/cmd/plume/prerelease.go b/mantle/cmd/plume/prerelease.go deleted file mode 100644 index b2937831..00000000 --- a/mantle/cmd/plume/prerelease.go +++ /dev/null @@ -1,569 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "bytes" - "errors" - "fmt" - "html/template" - "net/http" - "net/url" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/Azure/azure-sdk-for-go/management/storageservice" - "github.com/Microsoft/azure-vhd-utils/vhdcore/validator" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" - - "github.com/coreos/mantle/platform/api/aws" - "github.com/coreos/mantle/platform/api/azure" - "github.com/coreos/mantle/sdk" - "github.com/coreos/mantle/storage" - "github.com/coreos/mantle/system" - "github.com/coreos/mantle/util" -) - -var ( - cmdPreRelease = &cobra.Command{ - Use: "pre-release [options]", - Short: "Run pre-release steps for CoreOS", - Long: "Runs pre-release steps for CoreOS, such as image uploading and OS image creation, and replication across regions.", - RunE: runPreRelease, - - SilenceUsage: true, - } - - platforms = map[string]platform{ - "aws": platform{ - displayName: "AWS", - handler: awsPreRelease, - }, - "azure": platform{ - displayName: "Azure", - handler: azurePreRelease, - }, - } - platformList []string - - selectedPlatforms []string - selectedDistro string - azureProfile string - awsCredentialsFile string - verifyKeyFile string - imageInfoFile string -) - -type imageMetadataAbstract struct { - Env string - Version string - Timestamp string - Respin string - ImageType string - Arch string -} - -type platform struct { - displayName string - handler func(context.Context, *http.Client, *storage.Bucket, *channelSpec, *imageInfo) error -} - -type imageInfo struct { - AWS *amiList `json:"aws,omitempty"` - Azure *azureImageInfo `json:"azure,omitempty"` -} - -// Common switches between Fedora Cloud and Fedora CoreOS -func AddSpecFlags(flags *pflag.FlagSet) { - flags.StringVarP(&specArch, "arch", "A", system.RpmArch(), "target arch") - flags.StringVarP(&specChannel, "channel", "C", "testing", "target channel") - if err := flags.MarkDeprecated("channel", "use --stream instead"); err != nil { - panic(err) - } - flags.StringVarP(&specChannel, "stream", "S", "testing", "target stream") - flags.StringVarP(&specVersion, "version", "V", "", "release version") -} - -func init() { - for k := range platforms { - platformList = append(platformList, k) - } - sort.Strings(platformList) - - cmdPreRelease.Flags().StringSliceVar(&selectedPlatforms, "platform", platformList, "platform to pre-release") - cmdPreRelease.Flags().StringVar(&selectedDistro, "distro", "fedora", "system to pre-release") - cmdPreRelease.Flags().StringVar(&azureProfile, "azure-profile", "", "Azure Profile json file") - cmdPreRelease.Flags().StringVar(&awsCredentialsFile, "aws-credentials", "", "AWS credentials file") - cmdPreRelease.Flags().StringVar(&verifyKeyFile, - "verify-key", "", "path to ASCII-armored PGP public key to be used in verifying download signatures. Defaults to CoreOS Buildbot (0412 7D0B FABE C887 1FFB 2CCE 50E0 8855 93D2 DCB4)") - cmdPreRelease.Flags().StringVar(&imageInfoFile, "write-image-list", "", "optional output file describing uploaded images") - AddSpecFlags(cmdPreRelease.Flags()) - AddFedoraSpecFlags(cmdPreRelease.Flags()) - root.AddCommand(cmdPreRelease) -} - -func runPreRelease(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - return errors.New("no args accepted") - } - - for _, platformName := range selectedPlatforms { - if _, ok := platforms[platformName]; !ok { - return fmt.Errorf("Unknown platform %q", platformName) - } - } - - switch selectedDistro { - case "fedora": - if err := runFedoraPreRelease(cmd); err != nil { - return err - } - default: - return fmt.Errorf("Unknown distro %q", selectedDistro) - } - plog.Printf("Pre-release complete, run `plume release` to finish.") - - return nil -} - -func runFedoraPreRelease(cmd *cobra.Command) error { - spec, err := ChannelFedoraSpec() - if err != nil { - return err - } - ctx := context.Background() - client := http.Client{} - - var imageInfo imageInfo - - for _, platformName := range selectedPlatforms { - platform := platforms[platformName] - plog.Printf("Running %v pre-release...", platform.displayName) - if err := platform.handler(ctx, &client, nil, &spec, &imageInfo); err != nil { - return err - } - } - - return nil -} - -// getImageFile downloads a bzipped CoreOS image, verifies its signature, -// decompresses it, and returns the decompressed path. -func getImageFile(client *http.Client, spec *channelSpec, src *storage.Bucket, fileName string) (string, error) { - switch selectedDistro { - case "fedora": - return getFedoraImageFile(client, spec, src, fileName) - default: - return "", fmt.Errorf("Invalid system: %v", selectedDistro) - } -} - -func getImageTypeURI() string { - if specImageType == "Cloud-Base" { - return "Cloud" - } - return specImageType -} - -func getFedoraImageFile(client *http.Client, spec *channelSpec, src *storage.Bucket, fileName string) (string, error) { - imagePath := strings.TrimSuffix(fileName, ".xz") - - if _, err := os.Stat(imagePath); err == nil { - plog.Printf("Reusing existing image %q", imagePath) - return imagePath, nil - } - - rawxzURI, err := url.Parse(fmt.Sprintf("%v/%v/compose/%v/%v/images/%v", spec.BaseURL, specComposeID, getImageTypeURI(), specArch, fileName)) - if err != nil { - return "", err - } - - plog.Printf("Downloading image %q to %q", rawxzURI, fileName) - - if err := sdk.UpdateFile(fileName, rawxzURI.String(), client); err != nil { - return "", err - } - - // decompress it - plog.Printf("Decompressing %q...", fileName) - if err := util.XzDecompressFile(imagePath, fileName); err != nil { - return "", err - } - return imagePath, nil -} - -func uploadAzureBlob(spec *channelSpec, api *azure.API, storageKey storageservice.GetStorageServiceKeysResponse, vhdfile, container, blobName string) error { - blobExists, err := api.BlobExists(spec.Azure.StorageAccount, storageKey.PrimaryKey, container, blobName) - if err != nil { - return fmt.Errorf("failed to check if file %q in account %q container %q exists: %v", vhdfile, spec.Azure.StorageAccount, container, err) - } - - if blobExists { - return nil - } - - if err := api.UploadBlob(spec.Azure.StorageAccount, storageKey.PrimaryKey, vhdfile, container, blobName, false); err != nil { - if _, ok := err.(azure.BlobExistsError); !ok { - return fmt.Errorf("uploading file %q to account %q container %q failed: %v", vhdfile, spec.Azure.StorageAccount, container, err) - } - } - return nil -} - -func createAzureImage(spec *channelSpec, api *azure.API, blobName, imageName string) error { - imageexists, err := api.OSImageExists(imageName) - if err != nil { - return fmt.Errorf("failed to check if image %q exists: %T %v", imageName, err, err) - } - - if imageexists { - plog.Printf("OS Image %q exists, using it", imageName) - return nil - } - - plog.Printf("Creating OS image with name %q", imageName) - - bloburl := api.UrlOfBlob(spec.Azure.StorageAccount, spec.Azure.Container, blobName).String() - - // a la https://github.com/coreos/scripts/blob/998c7e093922298637e7c7e82e25cee7d336144d/oem/azure/set-image-metadata.sh - md := &azure.OSImage{ - Label: spec.Azure.Label, - Name: imageName, - OS: "Linux", - Description: spec.Azure.Description, - MediaLink: bloburl, - ImageFamily: spec.Azure.Label, - PublishedDate: time.Now().UTC().Format("2006-01-02"), - RecommendedVMSize: spec.Azure.RecommendedVMSize, - IconURI: spec.Azure.IconURI, - SmallIconURI: spec.Azure.SmallIconURI, - } - - return api.AddOSImage(md) -} - -func replicateAzureImage(spec *channelSpec, api *azure.API, imageName string) error { - plog.Printf("Fetching Azure Locations...") - locations, err := api.Locations() - if err != nil { - return err - } - - plog.Printf("Replicating image to locations: %s", strings.Join(locations, ", ")) - - channelTitle := strings.Title(specChannel) - - if err := api.ReplicateImage(imageName, spec.Azure.Offer, channelTitle, specVersion, locations...); err != nil { - return fmt.Errorf("image replication failed: %v", err) - } - - return nil -} - -type azureImageInfo struct { - ImageName string `json:"image"` -} - -// azurePreRelease runs everything necessary to prepare a CoreOS release for Azure. -// -// This includes uploading the vhd image to Azure storage, creating an OS image from it, -// and replicating that OS image. -func azurePreRelease(ctx context.Context, client *http.Client, src *storage.Bucket, spec *channelSpec, imageInfo *imageInfo) error { - if spec.Azure.StorageAccount == "" { - plog.Notice("Azure image creation disabled.") - return nil - } - - // download azure vhd image and unzip it - vhdfile, err := getImageFile(client, spec, src, spec.Azure.Image) - if err != nil { - return err - } - - // sanity check - validate VHD file - plog.Printf("Validating VHD file %q", vhdfile) - if err := validator.ValidateVhd(vhdfile); err != nil { - return err - } - if err := validator.ValidateVhdSize(vhdfile); err != nil { - return err - } - - blobName := fmt.Sprintf("container-linux-%s-%s.vhd", specVersion, specChannel) - // channel name should be caps for azure image - imageName := fmt.Sprintf("%s-%s-%s", spec.Azure.Offer, strings.Title(specChannel), specVersion) - - for _, environment := range spec.Azure.Environments { - // construct azure api client - api, err := azure.New(&azure.Options{ - AzureProfile: azureProfile, - AzureSubscription: environment.SubscriptionName, - }) - if err != nil { - return fmt.Errorf("failed to create Azure API: %v", err) - } - - plog.Printf("Fetching Azure storage credentials") - - storageKey, err := api.GetStorageServiceKeys(spec.Azure.StorageAccount) - if err != nil { - return err - } - - // upload blob, do not overwrite - plog.Printf("Uploading %q to Azure Storage...", vhdfile) - - containers := append([]string{spec.Azure.Container}, environment.AdditionalContainers...) - for _, container := range containers { - err := uploadAzureBlob(spec, api, storageKey, vhdfile, container, blobName) - if err != nil { - return err - } - } - - // create image - if err := createAzureImage(spec, api, blobName, imageName); err != nil { - // if it is a conflict, it already exists! - if !azure.IsConflictError(err) { - return err - } - - plog.Printf("Azure image %q already exists", imageName) - } - - // replicate it - if err := replicateAzureImage(spec, api, imageName); err != nil { - return err - } - } - - imageInfo.Azure = &azureImageInfo{ - ImageName: imageName, - } - return nil -} - -func getSpecAWSImageMetadata(spec *channelSpec) (map[string]string, error) { - imageFileName := spec.AWS.Image - imageMetadata := imageMetadataAbstract{ - Env: specEnv, - Version: specVersion, - Timestamp: specTimestamp, - Respin: specRespin, - ImageType: specImageType, - Arch: specArch, - } - t := template.Must(template.New("filename").Parse(imageFileName)) - buffer := &bytes.Buffer{} - if err := t.Execute(buffer, imageMetadata); err != nil { - return nil, err - } - imageFileName = buffer.String() - - var imageName string - switch selectedDistro { - case "fedora": - imageName = strings.TrimSuffix(imageFileName, ".raw.xz") - } - - imageDescription := fmt.Sprintf("%v %v %v", spec.AWS.BaseDescription, specChannel, specVersion) - - awsImageMetaData := map[string]string{ - "imageFileName": imageFileName, - "imageName": imageName, - "imageDescription": imageDescription, - } - - return awsImageMetaData, nil -} - -func awsUploadToPartition(spec *channelSpec, part *awsPartitionSpec, imagePath string) (map[string]string, error) { - plog.Printf("Connecting to %v...", part.Name) - api, err := aws.New(&aws.Options{ - CredentialsFile: awsCredentialsFile, - Profile: part.Profile, - Region: part.BucketRegion, - }) - if err != nil { - return nil, fmt.Errorf("creating client for %v: %v", part.Name, err) - } - - f, err := os.Open(imagePath) - if err != nil { - return nil, fmt.Errorf("Could not open image file %v: %v", imagePath, err) - } - defer f.Close() - - awsImageMetadata, err := getSpecAWSImageMetadata(spec) - if err != nil { - return nil, fmt.Errorf("Could not generate the image metadata: %v", err) - } - - imageFileName := awsImageMetadata["imageFileName"] - imageName := awsImageMetadata["imageName"] - imageDescription := awsImageMetadata["imageDescription"] - - var s3ObjectPath string - switch selectedDistro { - case "fedora": - s3ObjectPath = fmt.Sprintf("%s/%s/%s", specArch, specVersion, strings.TrimSuffix(imageFileName, filepath.Ext(imageFileName))) - } - s3ObjectURL := fmt.Sprintf("s3://%s/%s", part.Bucket, s3ObjectPath) - - snapshot, err := api.FindSnapshot(imageName) - if err != nil { - return nil, fmt.Errorf("unable to check for snapshot: %v", err) - } - - if snapshot == nil { - plog.Printf("Creating S3 object %v...", s3ObjectURL) - err = api.UploadObject(f, part.Bucket, s3ObjectPath, false) - if err != nil { - return nil, fmt.Errorf("Error uploading: %v", err) - } - - plog.Printf("Creating EBS snapshot...") - - var format aws.EC2ImageFormat - switch selectedDistro { - case "fedora": - format = aws.EC2ImageFormatRaw - } - - snapshot, err = api.CreateSnapshot(imageName, s3ObjectURL, format) - if err != nil { - return nil, fmt.Errorf("unable to create snapshot: %v", err) - } - } - - // delete unconditionally to avoid leaks after a restart - plog.Printf("Deleting S3 object %v...", s3ObjectURL) - err = api.DeleteObject(part.Bucket, s3ObjectPath) - if err != nil { - return nil, fmt.Errorf("Error deleting S3 object: %v", err) - } - - plog.Printf("Creating AMIs from %v...", snapshot.SnapshotID) - - imageID, err := api.CreateHVMImage(snapshot.SnapshotID, aws.ContainerLinuxDiskSizeGiB, imageName, imageDescription, "x86_64") - if err != nil { - return nil, fmt.Errorf("unable to create image: %v", err) - } - resources := []string{snapshot.SnapshotID, imageID} - - switch selectedDistro { - case "fedora": - err = api.CreateTags(resources, map[string]string{ - "Channel": specChannel, - "Version": specVersion, - "ComposeID": specComposeID, - "Date": specTimestamp, - "Arch": specArch, - }) - if err != nil { - return nil, fmt.Errorf("couldn't tag images: %v", err) - } - } - - if len(part.LaunchPermissions) > 0 { - if err := api.GrantLaunchPermission(imageID, part.LaunchPermissions); err != nil { - return nil, err - } - } - - destRegions := make([]string, 0, len(part.Regions)) - foundBucketRegion := false - for _, region := range part.Regions { - if region != part.BucketRegion { - destRegions = append(destRegions, region) - } else { - foundBucketRegion = true - } - } - if !foundBucketRegion { - // We don't handle this case and shouldn't ever - // encounter it - return nil, fmt.Errorf("BucketRegion %v is not listed in Regions", part.BucketRegion) - } - - amis := map[string]string{} - if len(destRegions) > 0 { - plog.Printf("Replicating AMI %v...", imageID) - err := api.CopyImage(imageID, destRegions, func(region string, ami aws.ImageData) { - amis[region] = ami.AMI - }) - if err != nil { - return nil, fmt.Errorf("couldn't copy image: %v", err) - } - } - amis[part.BucketRegion] = imageID - - return amis, nil -} - -type amiListEntry struct { - Region string `json:"name"` - Ami string `json:"hvm"` -} - -type amiList struct { - Entries []amiListEntry `json:"amis"` -} - -// awsPreRelease runs everything necessary to prepare a CoreOS release for AWS. -// -// This includes uploading the ami_vmdk image to an S3 bucket in each EC2 -// partition, creating AMIs, and replicating the AMIs to each region. -func awsPreRelease(ctx context.Context, client *http.Client, src *storage.Bucket, spec *channelSpec, imageInfo *imageInfo) error { - if spec.AWS.Image == "" { - plog.Notice("AWS image creation disabled.") - return nil - } - - awsImageMetadata, err := getSpecAWSImageMetadata(spec) - if err != nil { - return fmt.Errorf("Could not generate the image filname: %v", err) - } - - imageFileName := awsImageMetadata["imageFileName"] - - imagePath, err := getImageFile(client, spec, src, imageFileName) - if err != nil { - return err - } - - var amis amiList - for i := range spec.AWS.Partitions { - amiMap, err := awsUploadToPartition(spec, &spec.AWS.Partitions[i], imagePath) - if err != nil { - return err - } - - for region := range amiMap { - amis.Entries = append(amis.Entries, amiListEntry{ - Region: region, - Ami: amiMap[region], - }) - } - } - - imageInfo.AWS = &amis - return nil -} diff --git a/mantle/cmd/plume/release.go b/mantle/cmd/plume/release.go index b9a8b76d..79c187d1 100644 --- a/mantle/cmd/plume/release.go +++ b/mantle/cmd/plume/release.go @@ -19,22 +19,28 @@ import ( "encoding/json" "fmt" "io/ioutil" - "net/http" "net/url" "path/filepath" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/coreos/mantle/platform/api/aws" - "github.com/coreos/mantle/storage" "github.com/coreos/stream-metadata-go/release" "github.com/spf13/cobra" - "golang.org/x/net/context" ) var ( - releaseDryRun bool - cmdRelease = &cobra.Command{ + awsCredentialsFile string + selectedDistro string + + specBucket string + specPolicy string + specProfile string + specRegion string + specStream string + specVersion string + + cmdRelease = &cobra.Command{ Use: "release [options]", Short: "Publish a new CoreOS release.", Run: runRelease, @@ -45,116 +51,50 @@ var ( func init() { cmdRelease.Flags().StringVar(&awsCredentialsFile, "aws-credentials", "", "AWS credentials file") cmdRelease.Flags().StringVar(&selectedDistro, "distro", "fcos", "system to release") - cmdRelease.Flags().StringVar(&azureProfile, "azure-profile", "", "Azure Profile json file") - cmdRelease.Flags().BoolVarP(&releaseDryRun, "dry-run", "n", false, - "perform a trial run, do not make changes") - AddSpecFlags(cmdRelease.Flags()) - AddFedoraSpecFlags(cmdRelease.Flags()) - AddFcosSpecFlags(cmdRelease.Flags()) + cmdRelease.Flags().StringVar(&specBucket, "bucket", "fcos-builds", "S3 bucket") + cmdRelease.Flags().StringVar(&specPolicy, "policy", "public-read", "Canned ACL policy") + cmdRelease.Flags().StringVar(&specProfile, "profile", "default", "AWS profile") + cmdRelease.Flags().StringVar(&specRegion, "region", "us-east-1", "S3 bucket region") + cmdRelease.Flags().StringVarP(&specStream, "stream", "S", "testing", "target stream") + cmdRelease.Flags().StringVarP(&specVersion, "version", "V", "", "release version") root.AddCommand(cmdRelease) } func runRelease(cmd *cobra.Command, args []string) { switch selectedDistro { case "fcos": - if err := runFcosRelease(cmd, args); err != nil { - plog.Fatal(err) - } - case "fedora": - if err := runFedoraRelease(cmd, args); err != nil { - plog.Fatal(err) - } + runFcosRelease(cmd, args) default: plog.Fatalf("Unknown distro %q:", selectedDistro) } } -func runFcosRelease(cmd *cobra.Command, args []string) error { +func runFcosRelease(cmd *cobra.Command, args []string) { if len(args) > 0 { plog.Fatal("No args accepted") } - - spec := FcosChannelSpec() - FcosValidateArguments() - - doS3(&spec) - - modifyReleaseMetadataIndex(&spec, specCommitId) - - return nil -} - -func runFedoraRelease(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - plog.Fatal("No args accepted") + if specVersion == "" { + plog.Fatal("--version is required") } - - spec, err := ChannelFedoraSpec() - if err != nil { - return err + if specStream == "" { + plog.Fatal("--stream is required") } - ctx := context.Background() - client := &http.Client{} - - // Make AWS images public. - doAWS(ctx, client, nil, &spec) - - return nil -} - -func doAWS(ctx context.Context, client *http.Client, src *storage.Bucket, spec *channelSpec) { - if spec.AWS.Image == "" { - plog.Notice("AWS image creation disabled.") - return + if specBucket == "" { + plog.Fatal("--bucket is required") } - - awsImageMetadata, err := getSpecAWSImageMetadata(spec) - if err != nil { - return + if specRegion == "" { + plog.Fatal("--region is required") } - imageName := awsImageMetadata["imageName"] - - for _, part := range spec.AWS.Partitions { - for _, region := range part.Regions { - if releaseDryRun { - plog.Printf("Checking for images in %v %v...", part.Name, region) - } else { - plog.Printf("Publishing images in %v %v...", part.Name, region) - } - - api, err := aws.New(&aws.Options{ - CredentialsFile: awsCredentialsFile, - Profile: part.Profile, - Region: region, - }) - if err != nil { - plog.Fatalf("creating client for %v %v: %v", part.Name, region, err) - } - - publish := func(imageName string) { - imageID, err := api.FindImage(imageName) - if err != nil { - plog.Fatalf("couldn't find image %q in %v %v: %v", imageName, part.Name, region, err) - } - - if !releaseDryRun { - err := api.PublishImage(imageID) - if err != nil { - plog.Fatalf("couldn't publish image in %v %v: %v", part.Name, region, err) - } - } - } - publish(imageName + "-hvm") - } - } + doS3() + modifyReleaseMetadataIndex() } -func doS3(spec *fcosChannelSpec) { +func doS3() { api, err := aws.New(&aws.Options{ CredentialsFile: awsCredentialsFile, - Profile: spec.Profile, - Region: spec.Region, + Profile: specProfile, + Region: specRegion, }) if err != nil { plog.Fatalf("creating aws client: %v", err) @@ -162,17 +102,17 @@ func doS3(spec *fcosChannelSpec) { // Assumes the bucket layout defined inside of // https://github.com/coreos/fedora-coreos-tracker/issues/189 - err = api.UpdateBucketObjectsACL(spec.Bucket, filepath.Join("prod", "streams", specChannel, "builds", specVersion), specPolicy) + err = api.UpdateBucketObjectsACL(specBucket, filepath.Join("prod", "streams", specStream, "builds", specVersion), specPolicy) if err != nil { plog.Fatalf("updating object ACLs: %v", err) } } -func modifyReleaseMetadataIndex(spec *fcosChannelSpec, commitId string) { +func modifyReleaseMetadataIndex() { api, err := aws.New(&aws.Options{ CredentialsFile: awsCredentialsFile, - Profile: spec.Profile, - Region: spec.Region, + Profile: specProfile, + Region: specRegion, }) if err != nil { plog.Fatalf("creating aws client: %v", err) @@ -184,9 +124,9 @@ func modifyReleaseMetadataIndex(spec *fcosChannelSpec, commitId string) { // version. Plus we need S3 creds anyway later on to push the modified // release index back. - path := filepath.Join("prod", "streams", specChannel, "releases.json") + path := filepath.Join("prod", "streams", specStream, "releases.json") data, err := func() ([]byte, error) { - f, err := api.DownloadFile(spec.Bucket, path) + f, err := api.DownloadFile(specBucket, path) if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == "NoSuchKey" { @@ -212,13 +152,13 @@ func modifyReleaseMetadataIndex(spec *fcosChannelSpec, commitId string) { plog.Fatalf("unmarshaling release metadata json: %v", err) } - releasePath := filepath.Join("prod", "streams", specChannel, "builds", specVersion, "release.json") + releasePath := filepath.Join("prod", "streams", specStream, "builds", specVersion, "release.json") url, err := url.Parse(fmt.Sprintf("https://builds.coreos.fedoraproject.org/%s", releasePath)) if err != nil { plog.Fatalf("creating metadata url: %v", err) } - releaseFile, err := api.DownloadFile(spec.Bucket, releasePath) + releaseFile, err := api.DownloadFile(specBucket, releasePath) if err != nil { plog.Fatalf("downloading release metadata at %s: %v", releasePath, err) } @@ -297,7 +237,7 @@ func modifyReleaseMetadataIndex(spec *fcosChannelSpec, commitId string) { releaseIdx.Metadata.LastModified = time.Now().UTC().Format("2006-01-02T15:04:05Z") releaseIdx.Note = "For use only by Fedora CoreOS internal tooling. All other applications should obtain release info from stream metadata endpoints." - releaseIdx.Stream = specChannel + releaseIdx.Stream = specStream out, err := json.Marshal(releaseIdx) if err != nil { @@ -306,7 +246,7 @@ func modifyReleaseMetadataIndex(spec *fcosChannelSpec, commitId string) { // we don't want this to be cached for very long so that e.g. Cincinnati picks it up quickly var releases_max_age = 60 * 5 - err = api.UploadObjectExt(bytes.NewReader(out), spec.Bucket, path, true, specPolicy, aws.ContentTypeJSON, releases_max_age) + err = api.UploadObjectExt(bytes.NewReader(out), specBucket, path, true, specPolicy, aws.ContentTypeJSON, releases_max_age) if err != nil { plog.Fatalf("uploading release metadata json: %v", err) } diff --git a/mantle/cmd/plume/stream_mirror.go b/mantle/cmd/plume/stream_mirror.go index b84b196e..661c3e33 100644 --- a/mantle/cmd/plume/stream_mirror.go +++ b/mantle/cmd/plume/stream_mirror.go @@ -48,9 +48,13 @@ var ( func init() { cmdStreamMirror.Flags().StringVar(&srcFile, "src-file", "", "Source path for stream JSON") - cmdStreamMirror.MarkFlagRequired("src-file") + if err := cmdStreamMirror.MarkFlagRequired("src-file"); err != nil { + panic(err) + } cmdStreamMirror.Flags().StringVar(&dest, "dest", "", "Write images to this directory") - cmdStreamMirror.MarkFlagRequired("dest") + if err := cmdStreamMirror.MarkFlagRequired("dest"); err != nil { + panic(err) + } cmdStreamMirror.Flags().StringVar(&destFile, "dest-file", "", "Destination path for stream JSON (only useful with --url)") cmdStreamMirror.Flags().StringVar(&newBaseURLArg, "url", "", "New base URL for build") cmdStreamMirror.Flags().StringArrayVarP(&artifactTypes, "artifact", "a", nil, "Only fetch this specific artifact type") diff --git a/mantle/cmd/plume/types.go b/mantle/cmd/plume/types.go deleted file mode 100644 index 9af2721c..00000000 --- a/mantle/cmd/plume/types.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2016-2018 Red Hat Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -type storageSpec struct { - BaseURL string - Title string // Replace the bucket name in index page titles - NamedPath string // Copy to $BaseURL/$Arch/$NamedPath - VersionPath bool // Copy to $BaseURL/$Arch/$Version - DirectoryHTML bool - IndexHTML bool -} - -type gceSpec struct { - Project string // GCE project name - Family string // A group name, also used as name prefix - Description string // Human readable-ish description - Licenses []string // Identifiers for tracking usage - Image string // File name of image source - Publish string // Write published image name to given file - Limit int // Limit on # of old images to keep -} - -type azureEnvironmentSpec struct { - SubscriptionName string // Name of subscription in Azure profile - AdditionalContainers []string // Extra containers to upload the disk image to -} - -type azureSpec struct { - Offer string // Azure offer name - Image string // File name of image source - StorageAccount string // Storage account to use for image uploads in each environment - Container string // Container to hold the disk image in each environment - Environments []azureEnvironmentSpec // Azure environments to upload to - - // Fields for azure.OSImage - Label string - Description string // Description of an image in this channel - RecommendedVMSize string - IconURI string - SmallIconURI string -} - -type awsPartitionSpec struct { - Name string // Printable name for the partition - Profile string // Authentication profile in ~/.aws - Bucket string // S3 bucket for uploading image - BucketRegion string // Region of the bucket - LaunchPermissions []string // Other accounts to give launch permission - Regions []string // Regions to create the AMI in -} - -type awsSpec struct { - BaseName string // Prefix of image name - BaseDescription string // Prefix of image description - Prefix string // Prefix for filenames of AMI lists - Image string // File name of image source - Partitions []awsPartitionSpec // AWS partitions -} - -type channelSpec struct { - BaseURL string // Copy from $BaseURL/$Arch/$Version - Arches []string - Destinations []storageSpec - GCE gceSpec - Azure azureSpec - AWS awsSpec -} - -type fcosChannelSpec struct { - Bucket string - Profile string - Region string -} diff --git a/mantle/fcos/metadata.go b/mantle/fcos/metadata.go index e30256f9..7ce45c20 100644 --- a/mantle/fcos/metadata.go +++ b/mantle/fcos/metadata.go @@ -21,12 +21,11 @@ import ( "net/http" "net/url" + coreosarch "github.com/coreos/stream-metadata-go/arch" "github.com/coreos/stream-metadata-go/fedoracoreos" fcosinternals "github.com/coreos/stream-metadata-go/fedoracoreos/internals" "github.com/coreos/stream-metadata-go/release" "github.com/coreos/stream-metadata-go/stream" - - "github.com/coreos/mantle/system" ) func fetchURL(u url.URL) ([]byte, error) { @@ -92,7 +91,7 @@ func FetchCanonicalStreamArtifacts(stream, architecture string) (*stream.Arch, e // FetchStreamThisArchitecture returns artifacts for the current architecture from // the given stream. func FetchStreamThisArchitecture(stream string) (*stream.Arch, error) { - return FetchCanonicalStreamArtifacts(stream, system.RpmArch()) + return FetchCanonicalStreamArtifacts(stream, coreosarch.CurrentRpmArch()) } // GetCosaBuildURL returns a URL prefix for the coreos-assembler build. diff --git a/mantle/go.mod b/mantle/go.mod index dcbc135a..6b9cc12f 100644 --- a/mantle/go.mod +++ b/mantle/go.mod @@ -1,6 +1,6 @@ module github.com/coreos/mantle -go 1.12 +go 1.17 require ( github.com/Azure/azure-sdk-for-go v8.1.0-beta+incompatible @@ -11,21 +11,17 @@ require ( github.com/aliyun/alibaba-cloud-sdk-go v1.61.1442 github.com/aliyun/aliyun-oss-go-sdk v2.0.3+incompatible github.com/aws/aws-sdk-go v1.34.28 - github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect - github.com/coreos/butane v0.14.0 - github.com/coreos/coreos-assembler-schema v0.0.0-00010101000000-000000000000 + github.com/coreos/butane v0.16.0 + github.com/coreos/coreos-assembler v0.14.0 github.com/coreos/go-semver v0.3.0 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e - github.com/coreos/go-systemd/v22 v22.0.0 - github.com/coreos/ignition/v2 v2.13.0 - github.com/coreos/ioprogress v0.0.0-20151023204047-4637e494fd9b + github.com/coreos/go-systemd/v22 v22.4.0 + github.com/coreos/ignition/v2 v2.14.0 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f - github.com/coreos/stream-metadata-go v0.1.7 - github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4 - github.com/digitalocean/go-libvirt v0.0.0-20200810224808-b9c702499bf7 // indirect + github.com/coreos/stream-metadata-go v0.4.0 + github.com/coreos/vcontext v0.0.0-20220810162454-88bd546c634c github.com/digitalocean/go-qemu v0.0.0-20200529005954-1b453d036a9c github.com/digitalocean/godo v1.33.0 - github.com/dimchansky/utfbom v1.1.1 // indirect github.com/gophercloud/gophercloud v0.22.0 github.com/gophercloud/utils v0.0.0-20210323225332-7b186010c04f github.com/kballard/go-shellquote v0.0.0-20150810074751-d8ec1a69a250 @@ -34,27 +30,61 @@ require ( github.com/pborman/uuid v1.2.0 github.com/pin/tftp v2.1.0+incompatible github.com/pkg/errors v0.9.1 - github.com/satori/go.uuid v1.2.0 // indirect - github.com/spf13/cobra v0.0.6 - github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace - github.com/ulikunitz/xz v0.5.10 + github.com/spf13/cobra v1.5.0 github.com/vincent-petithory/dataurl v1.0.0 github.com/vishvananda/netlink v0.0.0-20150710184826-9cff81214893 github.com/vishvananda/netns v0.0.0-20150710222425-604eaf189ee8 github.com/vmware/govmomi v0.15.0 - github.com/zmb3/gogetdoc v0.0.0-20190228002656-b37376c5da6a // indirect - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 - golang.org/x/net v0.0.0-20220722155237-a158d28d115b + golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 - golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab - golang.org/x/text v0.3.7 - golang.org/x/tools v0.1.12 // indirect - golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f + golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 + golang.org/x/text v0.3.6 google.golang.org/api v0.34.0 gopkg.in/yaml.v2 v2.4.0 ) +require ( + cloud.google.com/go v0.65.0 // indirect + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/clarketm/json v1.17.1 // indirect + github.com/coreos/go-json v0.0.0-20220810161552-7cce03887f34 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/digitalocean/go-libvirt v0.0.0-20200810224808-b9c702499bf7 // indirect + github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/godbus/dbus/v5 v5.0.4 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.4.2 // indirect + github.com/google/go-querystring v1.0.0 // indirect + github.com/google/uuid v1.1.1 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/json-iterator/go v1.1.10 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/satori/go.uuid v1.2.0 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect + github.com/stretchr/testify v1.8.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + go.opencensus.io v0.22.5 // indirect + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect + google.golang.org/appengine v1.6.6 // indirect + google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d // indirect + google.golang.org/grpc v1.31.1 // indirect + google.golang.org/protobuf v1.25.0 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + replace ( - github.com/coreos/coreos-assembler-schema => ../schema + github.com/coreos/coreos-assembler => ../ google.golang.org/cloud => cloud.google.com/go v0.0.0-20190220171618-cbb15e60dc6d ) diff --git a/mantle/go.sum b/mantle/go.sum index e11e614d..19de4ce7 100644 --- a/mantle/go.sum +++ b/mantle/go.sum @@ -46,62 +46,53 @@ github.com/IBM/ibm-cos-sdk-go v1.6.1 h1:2XG/fsXno8228gBEwxf0u2AFI/Nii3wpk17lkpF0 github.com/IBM/ibm-cos-sdk-go v1.6.1/go.mod h1:BOqDAOxuJTamCSdAKx2XStknDaeB99nXWaf1PtvW0iY= github.com/Microsoft/azure-vhd-utils v0.0.0-20161127050200-43293b8d7646 h1:fcoVWIsxst9kR4rX9gS4v2TnCqr8qCiTNhZn1kFwkeg= github.com/Microsoft/azure-vhd-utils v0.0.0-20161127050200-43293b8d7646/go.mod h1:ShD4+ef/PGSGjqi4XYHwhYHoHQ61U0Qy0UXABMLCS3c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1442 h1:W45RUXYz49qyUhP66Aw9u+IzU1Xg0uts4b7HprWjs14= github.com/aliyun/alibaba-cloud-sdk-go v1.61.1442/go.mod h1:RcDobYh8k5VP6TNybz9m++gL3ijVI5wueVr0EM10VsU= github.com/aliyun/aliyun-oss-go-sdk v2.0.3+incompatible h1:724q2AmQ3m1mrdD9kYqK5+1+Zr77vS21jdQ9iF9t4b8= github.com/aliyun/aliyun-oss-go-sdk v2.0.3+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.30.28/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28 h1:sscPpn/Ns3i0F4HPEWAVcwdIRaZZCuL7llJ2/60yPIk= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beevik/etree v1.1.1-0.20200718192613-4a2f8b9d084c/go.mod h1:0yGO2rna3S9DkITDWHY1bMtcY4IJ4w+4S+EooZUR0bE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clarketm/json v1.14.1 h1:43bkbTTKKdDx7crs3WHzkrnH6S1EvAF1VZrdFGMmmz4= -github.com/clarketm/json v1.14.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= +github.com/clarketm/json v1.17.1 h1:U1IxjqJkJ7bRK4L6dyphmoO840P6bdhPdbbLySourqI= +github.com/clarketm/json v1.17.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/butane v0.14.0 h1:1xLt5y6RR8NTmeDf6yMzqP7Jqre8PvJ/1BTXQTxEMhk= -github.com/coreos/butane v0.14.0/go.mod h1:Q5DcBsHDckEZ7IgQSb1MvvkNc50dpoT1lOHdGWwCRjY= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-json v0.0.0-20211020211907-c63f628265de h1:qZvNu52Tv7Jfbgxdw3ONHf0BK9UpuSxi9FA9Y+qU5VU= +github.com/coreos/butane v0.16.0 h1:qIH6H9O5lF+NfX7Msz6b4+MezZ68zYCNXBjTPbgnlyU= +github.com/coreos/butane v0.16.0/go.mod h1:7TBe8e7UqDtqsR9pMZl/mKeYT1QYTAFcAiGF5ivUaaQ= github.com/coreos/go-json v0.0.0-20211020211907-c63f628265de/go.mod h1:lryFBkhadOfv8Jue2Vr/f/Yviw8h1DQPQojbXqEChY0= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-json v0.0.0-20220810161552-7cce03887f34 h1:14qC8Go5ArRXeK4neVu4GwD/2KZcLsRotqGW7eBRqwk= +github.com/coreos/go-json v0.0.0-20220810161552-7cce03887f34/go.mod h1:jdmhE6D2v5tisGyVw92x7/r3USTNm2VAkdRZ4ZydKQk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/ignition/v2 v2.13.0 h1:1ouW+d0nOuPUbLjxxOCnC+dWQxynr8Mt5exqJoCD7b4= -github.com/coreos/ignition/v2 v2.13.0/go.mod h1:HO1HWYWcvAIbHu6xewoKxPGBTyZ32FLwGIuipw5d63o= -github.com/coreos/ioprogress v0.0.0-20151023204047-4637e494fd9b h1:mpeSDqY0vMUyJmazX0p62MrHtgwlmOf7Y8rqOYL1cmc= -github.com/coreos/ioprogress v0.0.0-20151023204047-4637e494fd9b/go.mod h1:JIWRG8HSwVYUrjdR/JsFg7DEby0nhQcWFPIQvXJyih8= +github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/ignition/v2 v2.14.0 h1:KfkCCnA6AK0kts/1zxzzNH5lDMCQN9sqqGcGs+RJVX4= +github.com/coreos/ignition/v2 v2.14.0/go.mod h1:wxc4qdYEIHLygzWbVVEuoD7lQGTZmMgX0VjAPYBbeEQ= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/stream-metadata-go v0.1.7 h1:WlD8p2mcGZ/OtROJo0o4eJKK179p697wlVzipTv2Wjc= -github.com/coreos/stream-metadata-go v0.1.7/go.mod h1:RTjQyHgO/G37oJ3qnqYK6Z4TPZ5EsaabOtfMjVXmgko= -github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4 h1:pfSsrvbjUFGINaPGy0mm2QKQKTdq7IcbUa+nQwsz2UM= +github.com/coreos/stream-metadata-go v0.4.0 h1:0OSYc0Q43lveZCzS8AvpWK+fgIMwDW+dwQYH56hSfBY= +github.com/coreos/stream-metadata-go v0.4.0/go.mod h1:clbqsP1PUHnpmZHqn7mMOB/ZU+RFa3MxpAmcvE+FJkU= github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4/go.mod h1:HckqHnP/HI41vS0bfVjJ20u6jD0biI5+68QwZm5Xb9U= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/coreos/vcontext v0.0.0-20220810162454-88bd546c634c h1:AjP8DGsqQOtNODjbPofQULNwS0CRq6grLckmB+EhpWE= +github.com/coreos/vcontext v0.0.0-20220810162454-88bd546c634c/go.mod h1:lTNa8nCDdioj9pWs3iUvaiyQEMDjOpok/oTgu5qVleE= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/go-libvirt v0.0.0-20200810224808-b9c702499bf7 h1:7469SEjOVNTL7ZRvLOJYp2jkQaLhggxYithWSHvXIGs= github.com/digitalocean/go-libvirt v0.0.0-20200810224808-b9c702499bf7/go.mod h1:UMlaMc4V1DeGbb53Bw12wwvepjpg/D8xhrdL0wfS6Hs= github.com/digitalocean/go-qemu v0.0.0-20200529005954-1b453d036a9c h1:N2oJLGil1ov9DNz8wx0/IiBZ0kOlRQlHHwx2CFGmovA= @@ -110,8 +101,6 @@ github.com/digitalocean/godo v1.33.0 h1:JNZ/0v/Wp//UAIh84YWZ/x5neB3V5lKgcCHzyqEr github.com/digitalocean/godo v1.33.0/go.mod h1:iJnN9rVu6K5LioLxLimlq0uRI+y/eAQjROUmeU/r0hY= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -123,9 +112,6 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -154,13 +140,11 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -223,15 +207,9 @@ github.com/gophercloud/gophercloud v0.22.0 h1:9lFISNLafZcecT0xUveIMt3IafexC6DIV9 github.com/gophercloud/gophercloud v0.22.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gophercloud/utils v0.0.0-20210323225332-7b186010c04f h1:+SO5iEqu9QjNWL9TfAmOE5u0Uizv1T3jpBuMJfMOVJ0= github.com/gophercloud/utils v0.0.0-20210323225332-7b186010c04f/go.mod h1:wx8HMD8oQD0Ryhz6+6ykq75PJ79iPyEqYHfwZ4l7OsA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -243,28 +221,19 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kballard/go-shellquote v0.0.0-20150810074751-d8ec1a69a250 h1:QyPDU73WRl/8CnuK3JltZLLuNhL3E4o3BROt4g8nFf0= github.com/kballard/go-shellquote v0.0.0-20150810074751-d8ec1a69a250/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= -github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -272,19 +241,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20150519154555-21cb3784d9bd h1:fQAexocAuf0oCEyjbn9Gd1GpkBkJjznx3ltKeDNdtrc= github.com/kylelemons/godebug v0.0.0-20150519154555-21cb3784d9bd/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= -github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v7 v7.0.12 h1:/4pxUdwn9w0QEryNkrrWaodIESPRX+NxpO0Q6hVdaAA= -github.com/minio/minio-go/v7 v7.0.12/go.mod h1:S23iSP5/gbMwtxeY5FM71R+TkAYyzEdoNEDDwpt8yWs= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -293,12 +253,10 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= @@ -311,7 +269,6 @@ github.com/packethost/packngo v0.0.0-20180426081943-80f62d78849d h1:Ub/9h46Drtmu github.com/packethost/packngo v0.0.0-20180426081943-80f62d78849d/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pin/tftp v2.1.0+incompatible h1:Yng4J7jv6lOc6IF4XoB5mnd3P7ZrF60XQq+my3FAMus= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= @@ -321,61 +278,38 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= -github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= -github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vishvananda/netlink v0.0.0-20150710184826-9cff81214893 h1:aa5mTO5h2yn1zhd6HH0IJDBRZdwocPbqrf3ci22Ni1U= @@ -384,8 +318,7 @@ github.com/vishvananda/netns v0.0.0-20150710222425-604eaf189ee8 h1:MmJ82dMUwQ+0j github.com/vishvananda/netns v0.0.0-20150710222425-604eaf189ee8/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vmware/govmomi v0.15.0 h1:fVMjwFASkUIGenwURwP0ruAzTjka0l2AV9wtARwkJLI= github.com/vmware/govmomi v0.15.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= -github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI= +github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3/go.mod h1:CSBTxrhePCm0cmXNKDGeu+6bOQzpaEklfCqEpn89JWk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= @@ -394,16 +327,10 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zmb3/gogetdoc v0.0.0-20190228002656-b37376c5da6a h1:00UFliGZl2UciXe8o/2iuEsRQ9u7z0rzDTVzuj6EYY0= -github.com/zmb3/gogetdoc v0.0.0-20190228002656-b37376c5da6a/go.mod h1:ofmGw6LrMypycsiWcyug6516EXpIxSbZ+uI9ppGypfY= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -412,9 +339,6 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -423,11 +347,9 @@ golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -458,20 +380,15 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -495,10 +412,9 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -517,12 +433,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -555,44 +468,36 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181207195948-8634b1ecd393/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -637,15 +542,11 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -708,7 +609,6 @@ google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d h1:92D1fum1bJLKSdr google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -732,7 +632,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -740,14 +639,10 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8X gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -756,8 +651,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/mantle/harness/harness.go b/mantle/harness/harness.go index 965cbaf3..9808f95c 100644 --- a/mantle/harness/harness.go +++ b/mantle/harness/harness.go @@ -61,7 +61,7 @@ type H struct { finished bool // Test function has completed. done bool // Test is finished and all subtests have completed. hasSub bool - subLock sync.RWMutex // guards hasSub + subLock sync.RWMutex // guards hasSub and subtests suite *Suite parent *H @@ -69,11 +69,14 @@ type H struct { name string // Name of test. start time.Time // Time test started duration time.Duration + released bool // Indicates whether the test has already released its parallel slot barrier chan bool // To signal parallel subtests they may start. signal chan bool // To signal a test is done. sub []*H // Queue of subtests to be run in parallel. + subtests []string // All subtests of this test - isParallel bool + isParallel bool + nonExclusiveTestsStarted bool timeout time.Duration // Duration for which the test will be allowed to run execTimer *time.Timer // Used to interrupt the test after timeout @@ -105,6 +108,19 @@ func (t *H) runTimeoutCheck(ctx context.Context, timeout time.Duration, f func() } } +// This functionn is robust to being called multiple times. +// Previously t.suite.release() was only called by tRunner, so +// tRunner ensured that a test is released only once. Since we are +// now exposing the release mechanism outside the package level, it is +// important to introduce idempotence to avoid any corrupted test queue +// states that may result from one test being released multiple times. +func (t *H) Release() { + if !t.released { + t.released = true + t.suite.release() + } +} + func (t *H) StartExecTimer() { ctx, cancel := context.WithCancel(context.Background()) t.timeoutContext = ctx @@ -222,6 +238,17 @@ func (c *H) Name() string { return c.name } +// Subtests returns the list of subtests +func (c *H) Subtests() []string { + return c.subtests +} + +func (c *H) SetSubtests(subtests []string) { + c.subLock.Lock() + c.subtests = subtests + c.subLock.Unlock() +} + // Context returns the context for the current test. // The context is cancelled when the test finishes. // A goroutine started during a test can wait for the @@ -240,6 +267,14 @@ func (c *H) setRan() { c.ran = true } +func (c *H) GetNonExclusiveTestStarted() bool { + return c.nonExclusiveTestsStarted +} + +func (c *H) NonExclusiveTestStarted() { + c.nonExclusiveTestsStarted = true +} + // Fail marks the function as having failed but continues execution. func (c *H) Fail() { if c.parent != nil { @@ -479,7 +514,7 @@ func tRunner(t *H, fn func(t *H)) { if len(t.sub) > 0 { // Run parallel subtests. // Decrease the running count for this test. - t.suite.release() + t.Release() // Release the parallel subtests. close(t.barrier) // Wait for subtests to complete. @@ -493,7 +528,7 @@ func tRunner(t *H, fn func(t *H)) { } else if t.isParallel { // Only release the count for this test if it was run as a parallel // test. See comment in Run method. - t.suite.release() + t.Release() } t.report() // Report after all subtests have finished. @@ -517,6 +552,7 @@ func tRunner(t *H, fn func(t *H)) { func (t *H) RunTimeout(name string, f func(t *H), timeout time.Duration) bool { t.subLock.Lock() t.hasSub = true + t.subtests = append(t.subtests, name) t.subLock.Unlock() testName, ok := t.suite.match.fullName(t, name) if !ok { @@ -589,7 +625,10 @@ func (t *H) report() { // could also write verbosely to the 'reporter sink'. I'm fine with // this being a TODO if you don't want to tackle it in this initial // PR. - t.reporters.ReportTest(t.name, status, t.duration, t.output.Bytes()) + t.subLock.Lock() + subtests := t.subtests + t.subLock.Unlock() + t.reporters.ReportTest(t.name, subtests, status, t.duration, t.output.Bytes()) } // CleanOutputDir creates/empties an output directory and returns the cleaned path. diff --git a/mantle/harness/harness_test.go b/mantle/harness/harness_test.go index 5a41e644..7b08ca01 100644 --- a/mantle/harness/harness_test.go +++ b/mantle/harness/harness_test.go @@ -18,7 +18,6 @@ package harness import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "reflect" @@ -330,13 +329,7 @@ func makeRegexp(s string) string { } func TestOutputDir(t *testing.T) { - var suitedir string - if dir, err := ioutil.TempDir("", ""); err != nil { - t.Fatal(err) - } else { - defer os.RemoveAll(dir) - suitedir = filepath.Join(dir, "_test_temp") - } + suitedir := filepath.Join(t.TempDir(), "_test_temp") var testdirs []string adddir := func(h *H) { @@ -370,13 +363,7 @@ func TestOutputDir(t *testing.T) { } func TestSubDirs(t *testing.T) { - var suitedir string - if dir, err := ioutil.TempDir("", ""); err != nil { - t.Fatal(err) - } else { - defer os.RemoveAll(dir) - suitedir = filepath.Join(dir, "_test_temp") - } + suitedir := t.TempDir() var testdirs []string adddir := func(h *H) { @@ -410,13 +397,7 @@ func TestSubDirs(t *testing.T) { } func TestTempDir(t *testing.T) { - var suitedir string - if dir, err := ioutil.TempDir("", ""); err != nil { - t.Fatal(err) - } else { - defer os.RemoveAll(dir) - suitedir = filepath.Join(dir, "_test_temp") - } + suitedir := t.TempDir() var testdirs []string opts := Options{ @@ -460,13 +441,7 @@ func TestTempDir(t *testing.T) { } func TestTempFile(t *testing.T) { - var suitedir string - if dir, err := ioutil.TempDir("", ""); err != nil { - t.Fatal(err) - } else { - defer os.RemoveAll(dir) - suitedir = filepath.Join(dir, "_test_temp") - } + suitedir := t.TempDir() var testfiles []string opts := Options{ diff --git a/mantle/harness/reporters/json.go b/mantle/harness/reporters/json.go index 59c94f72..98cb4860 100644 --- a/mantle/harness/reporters/json.go +++ b/mantle/harness/reporters/json.go @@ -39,6 +39,7 @@ type jsonReporter struct { type jsonTest struct { Name string `json:"name"` + Subtests []string `json:"subtests"` Result testresult.TestResult `json:"result"` Duration time.Duration `json:"duration"` Output string `json:"output"` @@ -70,12 +71,13 @@ func NewJSONReporter(filename, platform, version string) *jsonReporter { } } -func (r *jsonReporter) ReportTest(name string, result testresult.TestResult, duration time.Duration, b []byte) { +func (r *jsonReporter) ReportTest(name string, subtests []string, result testresult.TestResult, duration time.Duration, b []byte) { r.mutex.Lock() defer r.mutex.Unlock() r.Tests = append(r.Tests, jsonTest{ Name: name, + Subtests: subtests, Result: result, Duration: duration, Output: string(b), diff --git a/mantle/harness/reporters/reporter.go b/mantle/harness/reporters/reporter.go index c0048c05..941b9ead 100644 --- a/mantle/harness/reporters/reporter.go +++ b/mantle/harness/reporters/reporter.go @@ -22,9 +22,9 @@ import ( type Reporters []Reporter -func (reps Reporters) ReportTest(name string, result testresult.TestResult, duration time.Duration, b []byte) { +func (reps Reporters) ReportTest(name string, subtests []string, result testresult.TestResult, duration time.Duration, b []byte) { for _, r := range reps { - r.ReportTest(name, result, duration, b) + r.ReportTest(name, subtests, result, duration, b) } } @@ -45,7 +45,7 @@ func (reps Reporters) SetResult(s testresult.TestResult) { } type Reporter interface { - ReportTest(string, testresult.TestResult, time.Duration, []byte) + ReportTest(string, []string, testresult.TestResult, time.Duration, []byte) Output(string) error SetResult(testresult.TestResult) } diff --git a/mantle/kola/cluster/cluster.go b/mantle/kola/cluster/cluster.go index f5a71315..458106b3 100644 --- a/mantle/kola/cluster/cluster.go +++ b/mantle/kola/cluster/cluster.go @@ -152,31 +152,16 @@ func (t *TestCluster) SSH(m platform.Machine, cmd string) ([]byte, error) { f := func() { stdout, stderr, err = m.SSH(cmd) } - t.Log("cmd:", cmd) errMsg := fmt.Sprintf("ssh: %s", cmd) // If f does not before the test timeout, the RunWithExecTimeoutCheck // will end this goroutine and mark the test as failed t.H.RunWithExecTimeoutCheck(f, errMsg) - /*if len(stdout) > 0 { - for _, line := range strings.Split(string(stdout), "\n") { - t.Log(line) - } - }*/ if len(stderr) > 0 { for _, line := range strings.Split(string(stderr), "\n") { t.Log(line) } } - if err != nil{ - if strings.Contains(string(stdout), "aht-dummy"){ - return stdout, nil - } - if strings.Contains(string(err.Error()), "Process exited with status 1"){ - return stdout, nil - } - t.Fatal(err) - } return stdout, err } diff --git a/mantle/kola/harness.go b/mantle/kola/harness.go index 51ef1c58..46afa889 100644 --- a/mantle/kola/harness.go +++ b/mantle/kola/harness.go @@ -55,9 +55,9 @@ import ( "github.com/coreos/mantle/platform/machine/packet" "github.com/coreos/mantle/platform/machine/qemuiso" "github.com/coreos/mantle/platform/machine/unprivqemu" - "github.com/coreos/mantle/sdk" "github.com/coreos/mantle/system" "github.com/coreos/mantle/util" + coreosarch "github.com/coreos/stream-metadata-go/arch" ) // InstalledTestsDir is a directory where "installed" external @@ -65,8 +65,13 @@ import ( // tests at /usr/lib/coreos-assembler/tests/kola/ostree/... // and this will be automatically picked up. const InstalledTestsDir = "/usr/lib/coreos-assembler/tests/kola" + +// InstalledTestMetaPrefix is the prefix for JSON-formatted metadata const InstalledTestMetaPrefix = "# kola:" +// InstalledTestMetaPrefixYaml is the prefix for YAML-formatted metadata +const InstalledTestMetaPrefixYaml = "## kola:" + // InstalledTestDefaultTest is a special name; see the README-kola-ext.md // for more information. const InstalledTestDefaultTest = "test.sh" @@ -75,6 +80,13 @@ const InstalledTestDefaultTest = "test.sh" // Specifying this in the tags list is required to denote a need for Internet access const NeedsInternetTag = "needs-internet" +// PlatformIndependentTag is currently equivalent to platform: qemu, but that may change in the future. +// For more, see the doc in external-tests.md. +const PlatformIndependentTag = "platform-independent" + +// defaultPlatformIndependentPlatform is the platform where we run tests that claim platform independence +const defaultPlatformIndependentPlatform = "qemu" + // Don't e.g. check console for kernel errors, SELinux AVCs, etc. const SkipBaseChecksTag = "skip-base-checks" @@ -95,11 +107,13 @@ var ( QEMUOptions = unprivqemu.Options{Options: &Options} // glue to set platform options from main QEMUIsoOptions = qemuiso.Options{Options: &Options} // glue to set platform options from main - CosaBuild *sdk.LocalBuild // this is a parsed cosa build + CosaBuild *util.LocalBuild // this is a parsed cosa build TestParallelism int //glue var to set test parallelism from main TAPFile string // if not "", write TAP results here NoNet bool // Disable tests requiring Internet + // ForceRunPlatformIndependent will cause tests that claim platform-independence to run + ForceRunPlatformIndependent bool DenylistedTests []string // tests which are on the denylist Tags []string // tags to be ran @@ -136,20 +150,10 @@ var ( desc: "kernel warning", match: regexp.MustCompile(`WARNING: CPU: \d+ PID: \d+ at (.+)`), }, - { - desc: "kernel circular locking dependency warning", - match: regexp.MustCompile("WARNING: possible circular locking dependency detected"), - }, { desc: "failure of disk under I/O", match: regexp.MustCompile("rejecting I/O to offline device"), }, - { - // Failure to set up Packet networking in initramfs, - // perhaps due to unresponsive metadata server - desc: "coreos-metadata failure to set up initramfs network", - match: regexp.MustCompile("Failed to start CoreOS Static Network Agent"), - }, { // https://github.com/coreos/bugs/issues/2065 desc: "excessive bonding link status messages", @@ -175,11 +179,6 @@ var ( desc: "initrd-cleanup.service terminated", match: regexp.MustCompile(`initrd-cleanup\.service: Main process exited, code=killed, status=15/TERM`), }, - { - // kernel 4.14.11 - desc: "bad page table", - match: regexp.MustCompile(`mm/pgtable-generic.c:\d+: bad (p.d|pte)`), - }, { desc: "Go panic", match: regexp.MustCompile("panic: (.*)"), @@ -196,6 +195,15 @@ var ( desc: "systemd ordering cycle", match: regexp.MustCompile("Ordering cycle found"), }, + { + desc: "oom killer", + match: regexp.MustCompile("invoked oom-killer"), + }, + { + // https://github.com/coreos/fedora-coreos-config/pull/1797 + desc: "systemd generator failure", + match: regexp.MustCompile(`systemd\[[0-9]+\]: (.*) failed with exit status`), + }, } ) @@ -206,6 +214,9 @@ const ( // kolaExtBinDataEnv is an environment variable pointing to the above kolaExtBinDataEnv = "KOLA_EXT_DATA" + // kolaExtContainerDataEnv includes the path to the ostree base container image in oci-archive format. + kolaExtContainerDataEnv = "KOLA_EXT_OSTREE_OCIARCHIVE" + // kolaExtBinDataName is the name for test dependency data kolaExtBinDataName = "data" ) @@ -292,6 +303,15 @@ func hasString(s string, slice []string) bool { return false } +func testSkipBaseChecks(test *register.Test) bool { + for _, tag := range test.Tags { + if tag == SkipBaseChecksTag { + return true + } + } + return false +} + func testRequiresInternet(test *register.Test) bool { for _, flag := range test.Flags { if flag == register.RequiresInternetAccess { @@ -314,12 +334,18 @@ type DenyListObj struct { Arches []string `yaml:"arches"` Platforms []string `yaml:"platforms"` SnoozeDate string `yaml:"snooze"` + OsVersion []string `yaml:"osversion"` } type ManifestData struct { - AddCommitMetadata struct { - FcosStream string `yaml:"fedora-coreos.stream"` - } `yaml:"add-commit-metadata"` + Variables struct { + Stream string `yaml:"stream"` + OsVersion string `yaml:"osversion"` + } `yaml:"variables"` +} + +type InitConfigData struct { + ConfigVariant string `json:"coreos-assembler.config-variant"` } func parseDenyListYaml(pltfrm string) error { @@ -342,26 +368,45 @@ func parseDenyListYaml(pltfrm string) error { plog.Debug("Parsed kola-denylist.yaml") - // Get stream and arch - pathToManifest := filepath.Join(Options.CosaWorkdir, "src/config/manifest.yaml") - manifestFile, err := ioutil.ReadFile(pathToManifest) + // Look for the right manifest, taking into account the variant + var manifest ManifestData + var pathToManifest string + pathToInitConfig := filepath.Join(Options.CosaWorkdir, "src/config.json") + initConfigFile, err := ioutil.ReadFile(pathToInitConfig) if os.IsNotExist(err) { - return nil + // No variant config found. Let's read the default manifest + pathToManifest = filepath.Join(Options.CosaWorkdir, "src/config/manifest.yaml") } else if err != nil { + // Unexpected error + return err + } else { + // Figure out the variant and read the corresponding manifests + var initConfig InitConfigData + err = json.Unmarshal(initConfigFile, &initConfig) + if err != nil { + return err + } + pathToManifest = filepath.Join(Options.CosaWorkdir, fmt.Sprintf("src/config/manifest-%s.yaml", initConfig.ConfigVariant)) + } + manifestFile, err := ioutil.ReadFile(pathToManifest) + if err != nil { return err } - - var manifest ManifestData err = yaml.Unmarshal(manifestFile, &manifest) if err != nil { return err } - stream := manifest.AddCommitMetadata.FcosStream + // Get the stream and osversion variables from the manifest + stream := manifest.Variables.Stream + osversion := manifest.Variables.OsVersion + + // Get the current arch & current time arch := Options.CosaBuildArch - plog.Debugf("Arch: %v detected.", arch) today := time.Now() + plog.Debugf("Denylist: Skipping tests for stream: '%s', osversion: '%s', arch: '%s'\n", stream, osversion, arch) + // Accumulate patterns filtering by set policies plog.Debug("Processing denial patterns from yaml...") for _, obj := range objs { @@ -377,6 +422,10 @@ func parseDenyListYaml(pltfrm string) error { continue } + if len(osversion) > 0 && len(obj.OsVersion) > 0 && !hasString(osversion, obj.OsVersion) { + continue + } + if obj.SnoozeDate != "" { snoozeDate, err := time.Parse(snoozeFormat, obj.SnoozeDate) if err != nil { @@ -390,7 +439,9 @@ func parseDenyListYaml(pltfrm string) error { fmt.Printf("⚠️ Skipping kola test pattern \"%s\":\n", obj.Pattern) } - fmt.Printf(" 👉 %s\n", obj.Tracker) + if obj.Tracker != "" { + fmt.Printf(" 👉 %s\n", obj.Tracker) + } DenylistedTests = append(DenylistedTests, obj.Pattern) } @@ -407,6 +458,17 @@ func filterTests(tests map[string]*register.Test, patterns []string, pltfrm stri checkPlatforms = append(checkPlatforms, "qemu") } + // sort tags into include/exclude + positiveTags := []string{} + negativeTags := []string{} + for _, tag := range Tags { + if strings.HasPrefix(tag, "!") { + negativeTags = append(negativeTags, tag[1:]) + } else { + positiveTags = append(positiveTags, tag) + } + } + // Higher-level functions default to '*' if the user didn't pass anything. // Notice this. (This totally ignores the corner case where the user // actually typed '*'). @@ -461,13 +523,24 @@ func filterTests(tests map[string]*register.Test, patterns []string, pltfrm stri } tagMatch := false - for _, tag := range Tags { + for _, tag := range positiveTags { tagMatch = hasString(tag, t.Tags) || tag == t.RequiredTag if tagMatch { break } } + negativeTagMatch := false + for _, tag := range negativeTags { + negativeTagMatch = hasString(tag, t.Tags) + if negativeTagMatch { + break + } + } + if negativeTagMatch { + continue + } + if t.RequiredTag != "" && // if the test has a required tag... !tagMatch && // and that tag was not provided by the user... (!userTypedPattern || !nameMatch) { // and the user didn't request it by name... @@ -484,7 +557,7 @@ func filterTests(tests map[string]*register.Test, patterns []string, pltfrm stri // If the user didn't explicitly type a pattern, then normally we // accept all tests, but if they *did* specify tags, then we only // accept tests which match those tags. - if len(Tags) > 0 && !tagMatch { + if len(positiveTags) > 0 && !tagMatch { continue } } @@ -508,6 +581,18 @@ func filterTests(tests map[string]*register.Test, patterns []string, pltfrm stri return allowed, excluded } + // For now, we hardcode platform independent tests to run only on one platform. + // But in the future, we should optimize this so that an overall + // test planner/scheduler knows to run the test at most once or twice. + // Platform independent tests could also run on AWS sometimes for example. + if !ForceRunPlatformIndependent { + for _, tag := range t.Tags { + if tag == PlatformIndependentTag { + t.Platforms = []string{defaultPlatformIndependentPlatform} + } + } + } + isExcluded := false allowed := false for _, platform := range checkPlatforms { @@ -516,7 +601,7 @@ func filterTests(tests map[string]*register.Test, patterns []string, pltfrm stri isExcluded = true break } - allowedArchitecture, _ := isAllowed(system.RpmArch(), t.Architectures, t.ExcludeArchitectures) + allowedArchitecture, _ := isAllowed(coreosarch.CurrentRpmArch(), t.Architectures, t.ExcludeArchitectures) allowed = allowed || (allowedPlatform && allowedArchitecture) } if isExcluded || !allowed { @@ -539,7 +624,7 @@ func filterTests(tests map[string]*register.Test, patterns []string, pltfrm stri delete(t.NativeFuncs, k) continue } - _, excluded = isAllowed(system.RpmArch(), nil, NativeFuncWrap.Exclusions) + _, excluded = isAllowed(coreosarch.CurrentRpmArch(), nil, NativeFuncWrap.Exclusions) if excluded { delete(t.NativeFuncs, k) } @@ -557,7 +642,7 @@ func filterTests(tests map[string]*register.Test, patterns []string, pltfrm stri // register tests in their init() function. outputDir is where various test // logs and data will be written for analysis after the test run. If it already // exists it will be erased! -func runProvidedTests(testsBank map[string]*register.Test, patterns []string, multiply int, rerun bool, pltfrm, outputDir string, propagateTestErrors bool) error { +func runProvidedTests(testsBank map[string]*register.Test, patterns []string, multiply int, rerun bool, allowRerunSuccess bool, pltfrm, outputDir string, propagateTestErrors bool) error { var versionStr string // Avoid incurring cost of starting machine in getClusterSemver when @@ -586,6 +671,9 @@ func runProvidedTests(testsBank map[string]*register.Test, patterns []string, mu var nonExclusiveTests []*register.Test for _, test := range tests { if test.NonExclusive { + if test.ExternalTest == "" { + plog.Fatalf("Tests compiled in kola must be exclusive: %v", test.Name) + } nonExclusiveTests = append(nonExclusiveTests, test) delete(tests, test.Name) } @@ -694,7 +782,10 @@ func runProvidedTests(testsBank map[string]*register.Test, patterns []string, mu if len(testsToRerun) > 0 && rerun { newOutputDir := filepath.Join(outputDir, "rerun") fmt.Printf("\n\n======== Re-running failed tests (flake detection) ========\n\n") - runProvidedTests(testsBank, testsToRerun, multiply, false, pltfrm, newOutputDir, propagateTestErrors) + reRunErr := runProvidedTests(testsBank, testsToRerun, multiply, false, allowRerunSuccess, pltfrm, newOutputDir, propagateTestErrors) + if allowRerunSuccess { + return reRunErr + } } // If the intial run failed and the rerun passed, we still return an error @@ -729,39 +820,50 @@ func GetRerunnableTestName(testName string) (string, bool) { func getRerunnable(tests []*harness.H) []string { var testsToRerun []string for _, h := range tests { - name, isRerunnable := GetRerunnableTestName(h.Name()) - if h.Failed() && isRerunnable { - testsToRerun = append(testsToRerun, name) + // The current nonexclusive test wrapper would have all non-exclusive tests. + // We would add all those tests for rerunning if none of the non-exclusive + // subtests start due to some initial failure. + if nonexclusiveWrapperMatch.MatchString(h.Name()) && !h.GetNonExclusiveTestStarted() { + if h.Failed() { + testsToRerun = append(testsToRerun, h.Subtests()...) + } + } else { + name, isRerunnable := GetRerunnableTestName(h.Name()) + if h.Failed() && isRerunnable { + testsToRerun = append(testsToRerun, name) + } } } return testsToRerun } -func RunTests(patterns []string, multiply int, rerun bool, pltfrm, outputDir string, propagateTestErrors bool) error { - return runProvidedTests(register.Tests, patterns, multiply, rerun, pltfrm, outputDir, propagateTestErrors) +func RunTests(patterns []string, multiply int, rerun bool, allowRerunSuccess bool, pltfrm, outputDir string, propagateTestErrors bool) error { + return runProvidedTests(register.Tests, patterns, multiply, rerun, allowRerunSuccess, pltfrm, outputDir, propagateTestErrors) } func RunUpgradeTests(patterns []string, rerun bool, pltfrm, outputDir string, propagateTestErrors bool) error { - return runProvidedTests(register.UpgradeTests, patterns, 0, rerun, pltfrm, outputDir, propagateTestErrors) + return runProvidedTests(register.UpgradeTests, patterns, 0, rerun, false, pltfrm, outputDir, propagateTestErrors) } // externalTestMeta is parsed from kola.json in external tests type externalTestMeta struct { - Architectures string `json:"architectures,omitempty"` - Platforms string `json:"platforms,omitempty"` - Distros string `json:"distros,omitempty"` - Tags string `json:"tags,omitempty"` - RequiredTag string `json:"requiredTag,omitempty"` - AdditionalDisks []string `json:"additionalDisks,omitempty"` - MinMemory int `json:"minMemory,omitempty"` - MinDiskSize int `json:"minDisk,omitempty"` - AdditionalNics int `json:"additionalNics,omitempty"` - AppendKernelArgs string `json:"appendKernelArgs,omitempty"` - AppendFirstbootKernelArgs string `json:"appendFirstbootKernelArgs,omitempty"` - Exclusive bool `json:"exclusive"` - TimeoutMin int `json:"timeoutMin"` - Conflicts []string `json:"conflicts"` - AllowConfigWarnings bool `json:"allowConfigWarnings"` + Architectures string `json:"architectures,omitempty" yaml:"architectures,omitempty"` + Platforms string `json:"platforms,omitempty" yaml:"platforms,omitempty"` + Distros string `json:"distros,omitempty" yaml:"distros,omitempty"` + Tags string `json:"tags,omitempty" yaml:"tags,omitempty"` + RequiredTag string `json:"requiredTag,omitempty" yaml:"requiredTag,omitempty"` + AdditionalDisks []string `json:"additionalDisks,omitempty" yaml:"additionalDisks,omitempty"` + InjectContainer bool `json:"injectContainer,omitempty" yaml:"injectContainer,omitempty"` + MinMemory int `json:"minMemory,omitempty" yaml:"minMemory,omitempty"` + MinDiskSize int `json:"minDisk,omitempty" yaml:"minDisk,omitempty"` + AdditionalNics int `json:"additionalNics,omitempty" yaml:"additionalNics,omitempty"` + AppendKernelArgs string `json:"appendKernelArgs,omitempty" yaml:"appendKernelArgs,omitempty"` + AppendFirstbootKernelArgs string `json:"appendFirstbootKernelArgs,omitempty" yaml:"appendFirstbootKernelArgs,omitempty"` + Exclusive bool `json:"exclusive" yaml:"exclusive"` + TimeoutMin int `json:"timeoutMin" yaml:"timeoutMin"` + Conflicts []string `json:"conflicts" yaml:"conflicts"` + AllowConfigWarnings bool `json:"allowConfigWarnings" yaml:"allowConfigWarnings"` + NoInstanceCreds bool `json:"noInstanceCreds" yaml:"noInstanceCreds"` } // metadataFromTestBinary extracts JSON-in-comment like: @@ -776,6 +878,8 @@ func metadataFromTestBinary(executable string) (*externalTestMeta, error) { defer f.Close() r := bufio.NewReader(io.LimitReader(f, 8192)) meta := &externalTestMeta{Exclusive: true} + inmeta := false // true if we saw a ## kola: prefix after which we expect YAML + metadatayaml := "" // accumulated YAML metadata for { line, err := r.ReadString('\n') if err == io.EOF { @@ -783,17 +887,38 @@ func metadataFromTestBinary(executable string) (*externalTestMeta, error) { } else if err != nil { return nil, err } - if !strings.HasPrefix(line, InstalledTestMetaPrefix) { - continue + + // Handle the older JSON metadata + if strings.HasPrefix(line, InstalledTestMetaPrefix) { + if inmeta { + return nil, fmt.Errorf("found both yaml and json test prefixes (%v %v)", InstalledTestMetaPrefixYaml, InstalledTestMetaPrefix) + } + buf := strings.TrimSpace(line[len(InstalledTestMetaPrefix):]) + dec := json.NewDecoder(strings.NewReader(buf)) + dec.DisallowUnknownFields() + meta = &externalTestMeta{Exclusive: true} + if err := dec.Decode(meta); err != nil { + return nil, errors.Wrapf(err, "parsing %s", line) + } + break // We're done processing } - buf := strings.TrimSpace(line[len(InstalledTestMetaPrefix):]) - dec := json.NewDecoder(strings.NewReader(buf)) - dec.DisallowUnknownFields() - meta = &externalTestMeta{Exclusive: true} - if err := dec.Decode(meta); err != nil { - return nil, errors.Wrapf(err, "parsing %s", line) + + // Look for the new YAML metadata + if strings.HasPrefix(line, InstalledTestMetaPrefixYaml) { + if inmeta { + return nil, fmt.Errorf("found multiple %s", InstalledTestMetaPrefixYaml) + } + inmeta = true + } else if inmeta { + if !strings.HasPrefix(line, "## ") { + if err := yaml.UnmarshalStrict([]byte(metadatayaml), &meta); err != nil { + return nil, err + } + break + } else { + metadatayaml += fmt.Sprintf("%s\n", line[3:]) + } } - break } return meta, nil } @@ -912,11 +1037,18 @@ Environment=KOLA_TEST_EXE=%s Environment=%s=%s ExecStart=%s `, unitName, testname, base, kolaExtBinDataEnv, destDataDir, remotepath) + if targetMeta.InjectContainer { + if CosaBuild == nil { + return fmt.Errorf("test %v uses injectContainer, but no cosa build found", testname) + } + ostreeContainer := CosaBuild.Meta.BuildArtifacts.Ostree + unit += fmt.Sprintf("Environment=%s=/home/nest/%s\n", kolaExtContainerDataEnv, ostreeContainer.Path) + } config.AddSystemdUnit(unitName, unit, conf.NoState) // Architectures using 64k pages use slightly more memory, ask for more than requested // to make sure that we don't run out of it. Currently ppc64le and aarch64 use 64k pages. - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "ppc64le", "aarch64": if targetMeta.MinMemory <= 4096 { targetMeta.MinMemory = targetMeta.MinMemory * 2 @@ -931,6 +1063,7 @@ ExecStart=%s Tags: []string{"external"}, AdditionalDisks: targetMeta.AdditionalDisks, + InjectContainer: targetMeta.InjectContainer, MinMemory: targetMeta.MinMemory, MinDiskSize: targetMeta.MinDiskSize, AdditionalNics: targetMeta.AdditionalNics, @@ -951,7 +1084,7 @@ ExecStart=%s } else { fmt.Printf("Fetching status failed: %v\n", suberr) } - if Options.SSHOnTestFailure { + if mach.RuntimeConf().SSHOnTestFailure { plog.Errorf("dropping to shell: kolet failed: %v: %s", err, stderr) if err := platform.Manhole(mach); err != nil { plog.Errorf("failed to get terminal via ssh: %v", err) @@ -985,6 +1118,9 @@ ExecStart=%s } else { t.Distros = strings.Fields(targetMeta.Distros) } + if targetMeta.NoInstanceCreds { + t.Flags = append(t.Flags, register.NoInstanceCreds) + } t.Tags = append(t.Tags, strings.Fields(targetMeta.Tags)...) // TODO validate tags here t.RequiredTag = targetMeta.RequiredTag @@ -1070,15 +1206,15 @@ func registerTestDir(dir, testprefix string, children []os.FileInfo) error { return err } } else if isreg && (c.Mode().Perm()&0001) == 0 { - _, err := os.Open(filepath.Join(dir, c.Name())) + file, err := os.Open(filepath.Join(dir, c.Name())) if err != nil { return errors.Wrapf(err, "opening %s", c.Name()) } - /*scanner := bufio.NewScanner(file) + scanner := bufio.NewScanner(file) scanner.Scan() if strings.HasPrefix(scanner.Text(), "#!") { plog.Warningf("Found non-executable file with shebang: %s\n", c.Name()) - }*/ + } } } @@ -1188,7 +1324,7 @@ func createTestBuckets(tests []*register.Test) [][]*register.Test { if _, found := testMap[conflict]; found { testMap[conflict].Conflicts = append(testMap[conflict].Conflicts, test.Name) } else { - plog.Fatalf("%v specified %v as a conflict but %v was not found. Double-check that it is marked as non-exclusive.", + plog.Debugf("%v specified %v as a conflict but %v was not found. If you are running both tests, verify that both are marked as non-exclusive.", test.Name, conflict, conflict) } } @@ -1239,13 +1375,21 @@ func makeNonExclusiveTest(bucket int, tests []*register.Test, flight platform.Fl var flags []register.Flag var nonExclusiveTestConfs []*conf.Conf dependencyDirs := make(register.DepDirMap) + var subtests []string for _, test := range tests { + subtests = append(subtests, test.Name) if test.HasFlag(register.NoSSHKeyInMetadata) || test.HasFlag(register.NoSSHKeyInUserData) { plog.Fatalf("Non-exclusive test %v cannot have NoSSHKeyIn* flag", test.Name) } + if test.HasFlag(register.NoInstanceCreds) { + plog.Fatalf("Non-exclusive test %v cannot have NoInstanceCreds flag", test.Name) + } if test.HasFlag(register.AllowConfigWarnings) { plog.Fatalf("Non-exclusive test %v cannot have AllowConfigWarnings flag", test.Name) } + if test.AppendKernelArgs != "" { + plog.Fatalf("Non-exclusive test %v cannot have AppendKernelArgs", test.Name) + } if !internetAccess && testRequiresInternet(test) { flags = append(flags, register.RequiresInternetAccess) internetAccess = true @@ -1276,6 +1420,7 @@ func makeNonExclusiveTest(bucket int, tests []*register.Test, flight platform.Fl for _, t := range tests { t := t run := func(h *harness.H) { + tcluster.H.NonExclusiveTestStarted() testResults.add(h) // tcluster has a reference to the wrapper's harness // We need a new TestCluster that has a reference to the @@ -1305,6 +1450,7 @@ func makeNonExclusiveTest(bucket int, tests []*register.Test, flight platform.Fl } }, UserData: mergedConfig, + Subtests: subtests, // This will allow runTest to copy kolet to machine NativeFuncs: make(map[string]register.NativeFuncWrap), ClusterSize: 1, @@ -1321,13 +1467,18 @@ func makeNonExclusiveTest(bucket int, tests []*register.Test, flight platform.Fl // analysis after the test run. It should already exist. func runTest(h *harness.H, t *register.Test, pltfrm string, flight platform.Flight) { h.Parallel() + h.SetSubtests(t.Subtests) rconf := &platform.RuntimeConfig{ - OutputDir: h.OutputDir(), - NoSSHKeyInUserData: t.HasFlag(register.NoSSHKeyInUserData), + AllowFailedUnits: testSkipBaseChecks(t), + InternetAccess: testRequiresInternet(t), + NoInstanceCreds: t.HasFlag(register.NoInstanceCreds), NoSSHKeyInMetadata: t.HasFlag(register.NoSSHKeyInMetadata), + NoSSHKeyInUserData: t.HasFlag(register.NoSSHKeyInUserData), + OutputDir: h.OutputDir(), + SSHOnTestFailure: Options.SSHOnTestFailure, WarningsAction: conf.FailWarnings, - InternetAccess: testRequiresInternet(t), + EarlyRelease: h.Release, } if t.HasFlag(register.AllowConfigWarnings) { rconf.WarningsAction = conf.IgnoreWarnings @@ -1341,11 +1492,9 @@ func runTest(h *harness.H, t *register.Test, pltfrm string, flight platform.Flig defer func() { h.StopExecTimer() c.Destroy() - for _, k := range t.Tags { - if k == SkipBaseChecksTag { - plog.Debugf("Skipping base checks for %s", t.Name) - return - } + if testSkipBaseChecks(t) { + plog.Debugf("Skipping base checks for %s", t.Name) + return } for id, output := range c.ConsoleOutput() { for _, badness := range CheckConsole([]byte(output), t) { @@ -1433,6 +1582,17 @@ func runTest(h *harness.H, t *register.Test, pltfrm string, flight platform.Flig } } + if t.InjectContainer { + if CosaBuild == nil { + h.Fatalf("Test %s uses injectContainer, but no cosa build found", t.Name) + } + ostreeContainer := CosaBuild.Meta.BuildArtifacts.Ostree + ostreeContainerPath := filepath.Join(CosaBuild.Dir, ostreeContainer.Path) + if err := cluster.DropFile(tcluster.Machines(), ostreeContainerPath); err != nil { + h.Fatal(err) + } + } + if t.ExternalTest != "" { setupExternalTest(h, t, tcluster) // Collect the journal logs after execution is finished diff --git a/mantle/kola/register/register.go b/mantle/kola/register/register.go index 7aec44d9..cd21f0a0 100644 --- a/mantle/kola/register/register.go +++ b/mantle/kola/register/register.go @@ -27,6 +27,7 @@ type Flag int const ( NoSSHKeyInUserData Flag = iota // don't inject SSH key into Ignition/cloud-config NoSSHKeyInMetadata // don't add SSH key to platform metadata + NoInstanceCreds // don't grant credentials (AWS instance profile, GCP service account) to the instance NoEmergencyShellCheck // don't check console output for emergency shell invocation RequiresInternetAccess // run the test only if the platform supports Internet access AllowConfigWarnings // ignore Ignition and Butane warnings instead of failing @@ -51,6 +52,7 @@ func CreateNativeFuncWrap(f func() error, exclusions ...string) NativeFuncWrap { // function is run. type Test struct { Name string // should be unique + Subtests []string Run func(cluster.TestCluster) NativeFuncs map[string]NativeFuncWrap UserData *conf.UserData @@ -76,6 +78,9 @@ type Test struct { // "5G:mpath,foo,bar"]) -- defaults to none. AdditionalDisks []string + // InjectContainer will cause the ostree base image to be injected into the target + InjectContainer bool + // Minimum amount of memory in MB required for test. MinMemory int diff --git a/mantle/kola/tests/coretest/core.go b/mantle/kola/tests/coretest/core.go index a536ef64..c5e76694 100644 --- a/mantle/kola/tests/coretest/core.go +++ b/mantle/kola/tests/coretest/core.go @@ -4,8 +4,6 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" - "os" "os/exec" "strconv" "strings" @@ -49,11 +47,11 @@ func init() { NativeFuncs: map[string]register.NativeFuncWrap{ "PortSSH": register.CreateNativeFuncWrap(TestPortSsh), "DbusPerms": register.CreateNativeFuncWrap(TestDbusPerms), - "NetworkScripts": register.CreateNativeFuncWrap(TestNetworkScripts, []string{"s390x"}...), "ServicesActive": register.CreateNativeFuncWrap(TestServicesActive), "ReadOnly": register.CreateNativeFuncWrap(TestReadOnlyFs), "Useradd": register.CreateNativeFuncWrap(TestUseradd), "MachineID": register.CreateNativeFuncWrap(TestMachineID), + "RHCOSGrowpart": register.CreateNativeFuncWrap(TestRHCOSGrowfs, []string{"fcos", "nestos"}...), "FCOSGrowpart": register.CreateNativeFuncWrap(TestFCOSGrowfs, []string{"rhcos"}...), }, }) @@ -61,7 +59,7 @@ func init() { // TODO: Only enable PodmanPing on non qemu-unpriv. Needs: // https://github.com/coreos/mantle/issues/1132 register.RegisterTest(®ister.Test{ - Name: "nestos.internet", + Name: "fcos.internet", Run: InternetTests, ClusterSize: 1, Flags: []register.Flag{register.RequiresInternetAccess}, @@ -79,7 +77,7 @@ func init() { "RandomUUID": register.CreateNativeFuncWrap(TestFsRandomUUID), }, // FIXME run on RHCOS once it has https://github.com/coreos/ignition-dracut/pull/93 - Distros: []string{"fcos", "nestos"}, + Distros: []string{"fcos"}, }) register.RegisterTest(®ister.Test{ Name: "rhcos.services-disabled", @@ -129,7 +127,7 @@ func TestPodmanWgetHead() error { // This execs gdbus, because we need to change uses to test perms. func TestDbusPerms() error { c := exec.Command( - "sudo", "-u", "core", + "sudo", "-u", "nest", "gdbus", "call", "--system", "--dest", "org.freedesktop.systemd1", "--object-path", "/org/freedesktop/systemd1", @@ -148,7 +146,7 @@ func TestDbusPerms() error { } c = exec.Command( - "sudo", "-u", "core", + "sudo", "-u", "nest", "gdbus", "call", "--system", "--dest", "org.freedesktop.systemd1", "--object-path", "/org/freedesktop/systemd1/unit/ntpd_2eservice", @@ -247,54 +245,8 @@ func TestReadOnlyFs() error { return fmt.Errorf("could not find /usr or / mount points.") } -func TestNetworkScripts() error { - networkScriptsDir := "/etc/sysconfig/network-scripts" - entries, err := ioutil.ReadDir(networkScriptsDir) - if err != nil { - if !os.IsNotExist(err) { - return err - } - return nil - } - if len(entries) > 0 { - return fmt.Errorf("Found content in %s", networkScriptsDir) - } - return nil -} - // Test that the root disk's GUID was set to a random one on first boot. func TestFsRandomUUID() error { - errQcow2 := TestFsRandomUUID_qcow2() - errIso := TestFsRandomUUID_iso() - if errQcow2 == nil || errIso == nil { - return nil - } - return errIso -} - -func TestFsRandomUUID_iso() error { - c := exec.Command("sh", "-ec", "sudo blkid -o value -s UUID /dev/loop0") - - out, err := c.Output() - - if err != nil { - return fmt.Errorf("findmnt: %v", err) - } - - got, err := uuid.ParseBytes(bytes.TrimSpace(out)) - if err != nil { - return fmt.Errorf("malformed GUID: %v", err) - } - - defaultGUID := uuid.Parse("00000000-0000-4000-a000-000000000001") - if uuid.Equal(defaultGUID, got) { - return fmt.Errorf("unexpected default GUID found") - } - - return nil -} - -func TestFsRandomUUID_qcow2() error { c := exec.Command("sh", "-ec", "sudo blkid -o value -s PTUUID /dev/$(lsblk -no PKNAME $(findmnt -vno SOURCE /))") out, err := c.Output() if err != nil { @@ -369,12 +321,7 @@ func TestRHCOSGrowfs() error { // and check that filesystem size has been grown to at least 7 GB. func TestFCOSGrowfs() error { // check that filesystem size is >= 7 GB - err_qemu := testGrowfs(7 * 1024 * 1024 * 1024) - err_iso := testGrowfs(1024 * 1024) - if err_qemu == nil || err_iso == nil { - return nil - } - return err_iso + return testGrowfs(7 * 1024 * 1024 * 1024) } func checkService(unit string) error { diff --git a/mantle/kola/tests/crio/crio.go b/mantle/kola/tests/crio/crio.go index 0810083c..3fbaa170 100644 --- a/mantle/kola/tests/crio/crio.go +++ b/mantle/kola/tests/crio/crio.go @@ -40,6 +40,11 @@ type simplifiedCrioInfo struct { CgroupDriver string `json:"cgroup_driver"` } +// overrideCrioOperationTimeoutSeconds replaces the currently *extremely* low +// default crio operation timeouts that cause flakes in our CI. +// See https://github.com/openshift/os/issues/818 +const overrideCrioOperationTimeoutSeconds = "300s" + // crioPodTemplate is a simple string template required for creating a pod in crio // It takes two strings: the name (which will be expanded) and the generated image name var crioPodTemplate = `{ @@ -283,12 +288,13 @@ func crioNetwork(c cluster.TestCluster) { } listener := func(ctx context.Context) error { - podID, err := c.SSHf(dest, "sudo crictl runp %s", crioConfigPod) + podID, err := c.SSHf(dest, "sudo crictl runp -T %s %s", overrideCrioOperationTimeoutSeconds, crioConfigPod) if err != nil { return err } - containerID, err := c.SSHf(dest, "sudo crictl create --no-pull %s %s %s", + containerID, err := c.SSHf(dest, "sudo crictl create -T %s --no-pull %s %s %s", + overrideCrioOperationTimeoutSeconds, podID, crioConfigContainer, crioConfigPod) if err != nil { return err @@ -326,12 +332,13 @@ func crioNetwork(c cluster.TestCluster) { time.Sleep(100 * time.Millisecond) } } - podID, err := c.SSHf(src, "sudo crictl runp %s", crioConfigPod) + podID, err := c.SSHf(src, "sudo crictl runp -T %s %s", overrideCrioOperationTimeoutSeconds, crioConfigPod) if err != nil { return err } - containerID, err := c.SSHf(src, "sudo crictl create --no-pull %s %s %s", + containerID, err := c.SSHf(src, "sudo crictl create -T %s --no-pull %s %s %s", + overrideCrioOperationTimeoutSeconds, podID, crioConfigContainer, crioConfigPod) if err != nil { return err @@ -379,9 +386,10 @@ func crioNetworksReliably(c cluster.TestCluster) { c.Fatal(err) } - cmdCreatePod := fmt.Sprintf("sudo crictl runp %s", crioConfigPod) + cmdCreatePod := fmt.Sprintf("sudo crictl runp -T %s %s", overrideCrioOperationTimeoutSeconds, crioConfigPod) podID := c.MustSSH(m, cmdCreatePod) - containerID := c.MustSSH(m, fmt.Sprintf("sudo crictl create --no-pull %s %s %s", + containerID := c.MustSSH(m, fmt.Sprintf("sudo crictl create -T %s --no-pull %s %s %s", + overrideCrioOperationTimeoutSeconds, podID, crioConfigContainer, crioConfigPod)) output = output + string(c.MustSSH(m, fmt.Sprintf("sudo crictl exec %s ping -i 0.2 %s -w 1 >/dev/null && echo PASS || echo FAIL", containerID, hostIP))) } @@ -441,9 +449,10 @@ func crioPodContinuesDuringServiceRestart(c cluster.TestCluster) { if err != nil { c.Fatal(err) } - cmdCreatePod := fmt.Sprintf("sudo crictl runp %s", crioConfigPod) + cmdCreatePod := fmt.Sprintf("sudo crictl runp -T %s %s", overrideCrioOperationTimeoutSeconds, crioConfigPod) podID := c.MustSSH(m, cmdCreatePod) - containerID := c.MustSSH(m, fmt.Sprintf("sudo crictl create --no-pull %s %s %s", + containerID := c.MustSSH(m, fmt.Sprintf("sudo crictl create -T %s --no-pull %s %s %s", + overrideCrioOperationTimeoutSeconds, podID, crioConfigContainer, crioConfigPod)) cmd := fmt.Sprintf("sudo crictl exec %s bash -c \"sleep 25 && echo PASS > /tmp/test/restart-test\"", containerID) diff --git a/mantle/kola/tests/docker/docker.go b/mantle/kola/tests/docker/docker.go index a6f588bd..e1587263 100644 --- a/mantle/kola/tests/docker/docker.go +++ b/mantle/kola/tests/docker/docker.go @@ -400,7 +400,7 @@ func dockerOldClient(c cluster.TestCluster) { genDockerContainer(c, m, "echo", []string{"echo"}) - output := c.MustSSH(m, "/home/core/docker-1.9.1 run echo echo 'IT WORKED'") + output := c.MustSSH(m, "/home/nest/docker-1.9.1 run echo echo 'IT WORKED'") if !bytes.Equal(output, []byte("IT WORKED")) { c.Fatalf("unexpected result from docker client: %q", output) diff --git a/mantle/kola/tests/ignition/empty.go b/mantle/kola/tests/ignition/empty.go index 9d500283..649f09fe 100644 --- a/mantle/kola/tests/ignition/empty.go +++ b/mantle/kola/tests/ignition/empty.go @@ -28,8 +28,8 @@ func init() { Name: "fcos.ignition.misc.empty", Run: noIgnitionSSHKey, ClusterSize: 1, - ExcludePlatforms: []string{"qemu", "esx", "aws", "azure","qemu-iso"}, - Distros: []string{"fcos"}, + ExcludePlatforms: []string{"qemu", "esx"}, + Distros: []string{"fcos", "nestos"}, UserData: conf.Empty(), Tags: []string{"ignition"}, }) @@ -37,8 +37,8 @@ func init() { Name: "fcos.ignition.v3.noop", Run: noIgnitionSSHKey, ClusterSize: 1, - ExcludePlatforms: []string{"qemu", "esx", "aws", "azure"}, - Distros: []string{"fcos"}, + ExcludePlatforms: []string{"qemu", "esx"}, + Distros: []string{"fcos", "nestos"}, Flags: []register.Flag{register.NoSSHKeyInUserData}, UserData: conf.Ignition(`{"ignition":{"version":"3.0.0"}}`), Tags: []string{"ignition"}, diff --git a/mantle/kola/tests/ignition/luks.go b/mantle/kola/tests/ignition/luks.go index c4a3731f..8fc407de 100644 --- a/mantle/kola/tests/ignition/luks.go +++ b/mantle/kola/tests/ignition/luks.go @@ -4,6 +4,8 @@ import ( "fmt" "time" + coreosarch "github.com/coreos/stream-metadata-go/arch" + "github.com/coreos/mantle/kola" "github.com/coreos/mantle/kola/cluster" "github.com/coreos/mantle/kola/register" @@ -11,7 +13,6 @@ import ( "github.com/coreos/mantle/platform" "github.com/coreos/mantle/platform/conf" "github.com/coreos/mantle/platform/machine/unprivqemu" - "github.com/coreos/mantle/system" "github.com/coreos/mantle/util" ) @@ -24,7 +25,7 @@ func init() { Name: `luks.tang`, Flags: []register.Flag{}, Distros: []string{"rhcos"}, - Tags: []string{"luks", "tang", kola.NeedsInternetTag}, + Tags: []string{"luks", "tang", kola.NeedsInternetTag, "reprovision"}, }) register.RegisterTest(®ister.Test{ Run: luksSSST1Test, @@ -34,7 +35,7 @@ func init() { Distros: []string{"rhcos"}, Platforms: []string{"qemu-unpriv"}, ExcludeArchitectures: []string{"s390x"}, // no TPM backend support for s390x - Tags: []string{"luks", "tpm", "tang", "sss", kola.NeedsInternetTag}, + Tags: []string{"luks", "tpm", "tang", "sss", kola.NeedsInternetTag, "reprovision"}, }) register.RegisterTest(®ister.Test{ Run: luksSSST2Test, @@ -44,7 +45,7 @@ func init() { Distros: []string{"rhcos"}, Platforms: []string{"qemu-unpriv"}, ExcludeArchitectures: []string{"s390x"}, // no TPM backend support for s390x - Tags: []string{"luks", "tpm", "tang", "sss", kola.NeedsInternetTag}, + Tags: []string{"luks", "tpm", "tang", "sss", kola.NeedsInternetTag, "reprovision"}, }) } @@ -90,8 +91,8 @@ func setupTangMachine(c cluster.TestCluster) ut.TangServer { // TODO: move container image to centralized namespace // container source: https://github.com/mike-nguyen/tang-docker-container/ containerImage := "quay.io/mike_nguyen/tang" - if system.RpmArch() != "x86_64" { - containerImage = "quay.io/multi-arch/tang:" + system.RpmArch() + if coreosarch.CurrentRpmArch() != "x86_64" { + containerImage = "quay.io/multi-arch/tang:" + coreosarch.CurrentRpmArch() } containerID, errMsg, err := m.SSH("sudo podman run -d -p 80:80 " + containerImage) @@ -161,7 +162,7 @@ func runTest(c cluster.TestCluster, tpm2 bool, threshold int, killTangAfterFirst MinMemory: 4096, } // ppc64le and aarch64 use 64K pages - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "ppc64le", "aarch64": opts.MinMemory = 8192 } @@ -171,7 +172,7 @@ func runTest(c cluster.TestCluster, tpm2 bool, threshold int, killTangAfterFirst } rootPart := "/dev/disk/by-partlabel/root" // hacky, but needed for s390x because of gpt issue with naming on big endian systems: https://bugzilla.redhat.com/show_bug.cgi?id=1899990 - if system.RpmArch() == "s390x" { + if coreosarch.CurrentRpmArch() == "s390x" { rootPart = "/dev/disk/by-id/virtio-primary-disk-part4" } ut.LUKSSanityTest(c, tangd, m, tpm2, killTangAfterFirstBoot, rootPart) diff --git a/mantle/kola/tests/ignition/passwd.go b/mantle/kola/tests/ignition/passwd.go index 113605d3..954d56f7 100644 --- a/mantle/kola/tests/ignition/passwd.go +++ b/mantle/kola/tests/ignition/passwd.go @@ -63,7 +63,7 @@ func init() { "passwd": { "users": [ { - "name": "core", + "name": "nest", "passwordHash": "foobar" }, { @@ -116,8 +116,8 @@ func users(c cluster.TestCluster) { tests := []userTest{ { - user: "core", - passwdRecord: "core:x:1000:1000:CoreOS Admin:/var/home/core:/bin/bash", + user: "nest", + passwdRecord: "nest:x:1000:1000:NestOS Admin:/var/home/nest:/bin/bash", shadowPassword: "foobar", }, { @@ -165,11 +165,11 @@ func testGroup(c cluster.TestCluster, m platform.Machine, tests []groupTest) { } else if out != t.groupRecord { c.Errorf("%q wasn't correctly created: got %q, expected %q", t.group, out, t.groupRecord) } - /*if out, err := getent(c, m, "gshadow", t.group); err != nil { + if out, err := getent(c, m, "gshadow", t.group); err != nil { c.Fatal(err) } else if out != t.gshadowRecord { c.Errorf("%q wasn't correctly created: got %q, expected %q", t.group, out, t.gshadowRecord) - }*/ + } } } diff --git a/mantle/kola/tests/ignition/resource.go b/mantle/kola/tests/ignition/resource.go index bce938c7..17027286 100644 --- a/mantle/kola/tests/ignition/resource.go +++ b/mantle/kola/tests/ignition/resource.go @@ -71,36 +71,10 @@ func init() { NativeFuncs: map[string]register.NativeFuncWrap{ "Serve": register.CreateNativeFuncWrap(Serve), }, - Tags: []string{"ignition"}, - // https://github.com/coreos/bugs/issues/2205 - ExcludePlatforms: []string{"do", "qemu-unpriv", "qemu-iso"}, + Tags: []string{"ignition"}, + ExcludePlatforms: []string{"qemu-unpriv"}, Timeout: 20 * time.Minute, }) - register.RegisterTest(®ister.Test{ - Name: "nestos.ignition.resource.remote", - Run: resourceRemote, - ClusterSize: 1, - Flags: []register.Flag{register.RequiresInternetAccess}, - Tags: []string{"ignition"}, - // https://github.com/coreos/bugs/issues/2205 for DO - ExcludePlatforms: []string{"do"}, - UserData: conf.Ignition(`{ - "ignition": { - "version": "3.0.0" - }, - "storage": { - "files": [ - { - "path": "/var/resource/http", - "contents": { - "source": "http://1.203.97.152/kola/nestos-ignition-resource-remote.txt" - }, - "mode": 420 - } - ] - } - }`), - }) } func resourceLocal(c cluster.TestCluster) { @@ -127,47 +101,6 @@ func resourceLocal(c cluster.TestCluster) { }) } -func resourceRemote(c cluster.TestCluster) { - m := c.Machines()[0] - - checkResources(c, m, map[string]string{ - "http": "NestOS", - }) -} - -func resourceS3(c cluster.TestCluster) { - m := c.Machines()[0] - - checkResources(c, m, map[string]string{ - // object accessible by any authenticated S3 user, such as - // the IAM role associated with the instance - "s3-auth": "kola-authenticated", - // object created by configuration accessible by any authenticated - // S3 user, such as the IAM role associated with the instance - "s3-config": "kola-config", - }) - - // verify that the objects are inaccessible anonymously - for _, objectName := range []string{"authenticated", "authenticated.ign"} { - _, _, err := m.SSH("curl -sf https://rh-kola-fixtures.s3.amazonaws.com/resources/" + objectName) - if err == nil { - c.Fatal("anonymously fetching authenticated resource should have failed, but did not") - } - } - - // ...but that the anonymous object is accessible - c.RunCmdSync(m, "curl -sf https://rh-kola-fixtures.s3.amazonaws.com/resources/anonymous") -} - -func resourceS3Versioned(c cluster.TestCluster) { - m := c.Machines()[0] - - checkResources(c, m, map[string]string{ - "original": "original", - "latest": "updated", - }) -} - func checkResources(c cluster.TestCluster, m platform.Machine, resources map[string]string) { for filename, expectedContents := range resources { contents := c.MustSSH(m, fmt.Sprintf("sudo cat /var/resource/%s", filename)) diff --git a/mantle/kola/tests/ignition/security.go b/mantle/kola/tests/ignition/security.go index dc897a5b..bcf25d7c 100644 --- a/mantle/kola/tests/ignition/security.go +++ b/mantle/kola/tests/ignition/security.go @@ -62,7 +62,7 @@ func init() { }, Tags: []string{"ignition"}, // QEMU unprivileged doesn't support multiple VMs communicating with each other. - ExcludePlatforms: []string{"qemu", "qemu-iso"}, + ExcludePlatforms: []string{"qemu"}, Timeout: 20 * time.Minute, }) } diff --git a/mantle/kola/tests/ignition/symlink.go b/mantle/kola/tests/ignition/symlink.go index 3624889b..407591a5 100644 --- a/mantle/kola/tests/ignition/symlink.go +++ b/mantle/kola/tests/ignition/symlink.go @@ -35,12 +35,12 @@ func init() { "links": [ { "group": { - "name": "core" + "name": "nest" }, "overwrite": true, "path": "/etc/localtime", "user": { - "name": "core" + "name": "nest" }, "hard": false, "target": "/usr/share/zoneinfo/Europe/Zurich" diff --git a/mantle/kola/tests/ignition/systemd.go b/mantle/kola/tests/ignition/systemd.go index 1b99001f..62f07c1e 100644 --- a/mantle/kola/tests/ignition/systemd.go +++ b/mantle/kola/tests/ignition/systemd.go @@ -22,7 +22,7 @@ import ( func init() { register.RegisterTest(®ister.Test{ - Name: "nestos.ignition.systemd.enable-service", + Name: "coreos.ignition.systemd.enable-service", Run: enableSystemdService, ClusterSize: 1, Tags: []string{"ignition"}, diff --git a/mantle/kola/tests/isula/isula.go b/mantle/kola/tests/isula/isula.go index 91dec51a..c8336968 100644 --- a/mantle/kola/tests/isula/isula.go +++ b/mantle/kola/tests/isula/isula.go @@ -15,12 +15,14 @@ func init() { Run: isulaBaseTest, ClusterSize: 1, Name: `isula.base`, + Distros: []string{"nestos"}, Flags: []register.Flag{register.RequiresInternetAccess}, }) register.RegisterTest(®ister.Test{ Run: isulaWorkflow, ClusterSize: 1, Name: `isula.workflow`, + Distros: []string{"nestos"}, Flags: []register.Flag{register.RequiresInternetAccess}, FailFast: true, }) @@ -28,7 +30,7 @@ func init() { func isulaBaseTest(c cluster.TestCluster) { c.Run("info", isulaInfo) - c.Run("resources", isulaResources) + // c.Run("resources", isulaResources) } // Test: Verify basic isula info information @@ -179,7 +181,7 @@ func isulaWorkflow(c cluster.TestCluster) { // Test: Remove container c.Run("remove", func(c cluster.TestCluster) { - _, err := c.SSH(m, "sudo isula rm busybox && sudo isula ps -a | grep busybox") + _, err := c.SSH(m, "sudo isula rm busybox") if err != nil { c.Fatal(err) } @@ -191,10 +193,5 @@ func isulaWorkflow(c cluster.TestCluster) { if err != nil { c.Fatal(err) } - - _, err = c.SSH(m, "sudo isula images | grep hub.oepkgs.net/library/busybox") - if err != nil { - c.Fatalf("Image should be deleted but found") - } }) } diff --git a/mantle/kola/tests/misc/auth.go b/mantle/kola/tests/misc/auth.go index 3a7cdefb..944ffb9a 100644 --- a/mantle/kola/tests/misc/auth.go +++ b/mantle/kola/tests/misc/auth.go @@ -33,7 +33,7 @@ func init() { func AuthVerify(c cluster.TestCluster) { m := c.Machines()[0] - client, err := m.PasswordSSHClient("core", "asdf") + client, err := m.PasswordSSHClient("nest", "asdf") if err == nil { client.Close() c.Fatalf("Successfully authenticated despite invalid password auth") diff --git a/mantle/kola/tests/misc/boot-mirror.go b/mantle/kola/tests/misc/boot-mirror.go index af9e6a8d..737f9bc6 100644 --- a/mantle/kola/tests/misc/boot-mirror.go +++ b/mantle/kola/tests/misc/boot-mirror.go @@ -4,7 +4,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -14,10 +14,12 @@ package misc import ( - "encoding/json" + "fmt" "strings" "time" + coreosarch "github.com/coreos/stream-metadata-go/arch" + "github.com/coreos/mantle/kola" "github.com/coreos/mantle/kola/cluster" "github.com/coreos/mantle/kola/register" @@ -25,7 +27,6 @@ import ( "github.com/coreos/mantle/platform" "github.com/coreos/mantle/platform/conf" "github.com/coreos/mantle/platform/machine/unprivqemu" - "github.com/coreos/mantle/system" ut "github.com/coreos/mantle/util" ) @@ -56,34 +57,35 @@ boot_device: func init() { register.RegisterTest(®ister.Test{ - Run: runBootMirrorTest, - ClusterSize: 0, - Name: `nestos.boot-mirror`, - Platforms: []string{"qemu-unpriv"}, + Run: runBootMirrorTest, + ClusterSize: 0, + Name: `coreos.boot-mirror`, + Platforms: []string{"qemu-unpriv"}, + ExcludeDistros: []string{"nestos"}, // Can't mirror boot disk on s390x ExcludeArchitectures: []string{"s390x"}, // skipping this test on UEFI until https://github.com/coreos/coreos-assembler/issues/2039 // gets resolved. ExcludeFirmwares: []string{"uefi"}, - Tags: []string{"boot-mirror", "raid1"}, + Tags: []string{"boot-mirror", "raid1", "reprovision"}, FailFast: true, Timeout: 15 * time.Minute, }) register.RegisterTest(®ister.Test{ - Run: runBootMirrorLUKSTest, - ClusterSize: 0, - Name: `nestos.boot-mirror.luks`, - Platforms: []string{"qemu-unpriv"}, + Run: runBootMirrorLUKSTest, + ClusterSize: 0, + Name: `coreos.boot-mirror.luks`, + Platforms: []string{"qemu-unpriv"}, + ExcludeDistros: []string{"nestos"}, // Can't mirror boot disk on s390x, and qemu s390x doesn't // support TPM ExcludeArchitectures: []string{"s390x"}, // skipping this test on UEFI until https://github.com/coreos/coreos-assembler/issues/2039 // gets resolved. ExcludeFirmwares: []string{"uefi"}, - Tags: []string{"boot-mirror", "luks", "raid1", "tpm2", kola.NeedsInternetTag}, + Tags: []string{"boot-mirror", "luks", "raid1", "tpm2", kola.NeedsInternetTag, "reprovision"}, FailFast: true, Timeout: 15 * time.Minute, - Distros: []string{"fcos"}, }) } @@ -100,7 +102,7 @@ func runBootMirrorTest(c cluster.TestCluster) { } // FIXME: for QEMU tests kola currently assumes the host CPU architecture // matches the one under test - userdata := bootmirror.Subst("LAYOUT", system.RpmArch()) + userdata := bootmirror.Subst("LAYOUT", coreosarch.CurrentRpmArch()) m, err = c.Cluster.(*unprivqemu.Cluster).NewMachineWithQemuOptions(userdata, options) if err != nil { c.Fatal(err) @@ -119,7 +121,7 @@ func runBootMirrorTest(c cluster.TestCluster) { if strings.Compare(string(fsTypeForRoot), "xfs") != 0 { c.Fatalf("didn't match fstype for root") } - bootMirrorSanityTest(c, m) + bootMirrorSanityTest(c, m, []string{"/dev/vda", "/dev/vdb", "/dev/vdc"}) detachPrimaryBlockDevice(c, m) // Check if there are two devices with the active raid @@ -147,7 +149,7 @@ func runBootMirrorLUKSTest(c cluster.TestCluster) { } // FIXME: for QEMU tests kola currently assumes the host CPU architecture // matches the one under test - userdata := bootmirrorluks.Subst("LAYOUT", system.RpmArch()) + userdata := bootmirrorluks.Subst("LAYOUT", coreosarch.CurrentRpmArch()) m, err = c.Cluster.(*unprivqemu.Cluster).NewMachineWithQemuOptions(userdata, options) if err != nil { c.Fatal(err) @@ -160,7 +162,7 @@ func runBootMirrorLUKSTest(c cluster.TestCluster) { if !strings.Contains(string(bootOutput), "/dev/vda3") || !strings.Contains(string(bootOutput), "/dev/vdb3") { c.Fatalf("boot raid device missing; found devices: %v", string(bootOutput)) } - bootMirrorSanityTest(c, m) + bootMirrorSanityTest(c, m, []string{"/dev/vda", "/dev/vdb"}) luksTPMTest(c, m, true) detachPrimaryBlockDevice(c, m) @@ -181,20 +183,36 @@ func runBootMirrorLUKSTest(c cluster.TestCluster) { func luksTPMTest(c cluster.TestCluster, m platform.Machine, tpm2 bool) { rootPart := "/dev/md/md-root" // hacky, but needed for s390x because of gpt issue with naming on big endian systems: https://bugzilla.redhat.com/show_bug.cgi?id=1899990 - if system.RpmArch() == "s390x" { + if coreosarch.CurrentRpmArch() == "s390x" { rootPart = "/dev/disk/by-id/virtio-primary-disk-part4" } var tangd util.TangServer util.LUKSSanityTest(c, tangd, m, true, false, rootPart) } -func bootMirrorSanityTest(c cluster.TestCluster, m platform.Machine) { +func bootMirrorSanityTest(c cluster.TestCluster, m platform.Machine, devices []string) { c.Run("sanity-check", func(c cluster.TestCluster) { // Check for boot checkIfMountpointIsRaid(c, m, "/boot") c.AssertCmdOutputContains(m, "findmnt -nvr /boot -o FSTYPE", "ext4") // Check that growpart didn't run c.RunCmdSync(m, "if [ -e /run/coreos-growpart.stamp ]; then exit 1; fi") + // Check that we took ownership of the rootfs + c.RunCmdSync(m, "sudo test -f /boot/.root_uuid") + // Check for bootuuid dropins where available + switch coreosarch.CurrentRpmArch() { + case "s390x": + case "x86_64", "aarch64": + for _, dev := range devices { + c.RunCmdSync(m, fmt.Sprintf(` + sudo mount -o ro %s2 /boot/efi + sudo sh -c 'test -f /boot/efi/EFI/*/bootuuid.cfg' + sudo umount /boot/efi`, dev)) + } + fallthrough + case "ppc64le": + c.RunCmdSync(m, "sudo test -f /boot/grub2/bootuuid.cfg") + } }) } @@ -229,68 +247,12 @@ func verifyBootMirrorAfterReboot(c cluster.TestCluster, m platform.Machine) { }) } -type lsblkOutput struct { - Blockdevices []blockdevice `json:"blockdevices"` -} - -type blockdevice struct { - Name string `json:"name"` - Type string `json:"type"` - Mountpoint *string `json:"mountpoint"` - // new lsblk outputs `mountpoints` instead of - // `mountpoint`; we handle both - Mountpoints []string `json:"mountpoints"` - Children []blockdevice `json:"children"` -} - // checkIfMountpointIsRaid will check if a given machine has a device of type // raid1 mounted at the given mountpoint. If it does not, the test is failed. func checkIfMountpointIsRaid(c cluster.TestCluster, m platform.Machine, mountpoint string) { - output := c.MustSSH(m, "lsblk --json") - - l := lsblkOutput{} - err := json.Unmarshal(output, &l) - if err != nil { - c.Fatalf("couldn't unmarshal lsblk output: %v", err) - } - - foundDevice := checkIfMountpointIsRaidWalker(c, l.Blockdevices, mountpoint) - if !foundDevice { - c.Fatalf("didn't find %q mountpoint in lsblk output", mountpoint) - } -} - -// checkIfMountpointIsRaidWalker will iterate over bs and recurse into its -// children, looking for a device mounted at / with type raid1. true is returned -// if such a device is found. The test is failed if a device of a different type -// is found to be mounted at /. -func checkIfMountpointIsRaidWalker(c cluster.TestCluster, bs []blockdevice, mountpoint string) bool { - for _, b := range bs { - if checkIfBlockdevHasMountPoint(b, mountpoint) { - if b.Type != "raid1" { - c.Fatalf("device %q is mounted at %q with type %q (was expecting raid1)", b.Name, mountpoint, b.Type) - } - return true - } - foundDevice := checkIfMountpointIsRaidWalker(c, b.Children, mountpoint) - if foundDevice { - return true - } - } - return false -} - -// checkIfBlockdevHasMountPoint checks if a given block device has the -// required mountpoint. -func checkIfBlockdevHasMountPoint(b blockdevice, mountpoint string) bool { - if b.Mountpoint != nil && *b.Mountpoint == mountpoint { - return true - } else if len(b.Mountpoints) != 0 { - for _, mnt := range b.Mountpoints { - if mnt != "" && mnt == mountpoint { - return true - } - } + backing_device := string(c.MustSSH(m, "findmnt -no SOURCE "+mountpoint)) + device_type := string(c.MustSSH(m, "lsblk -no TYPE "+backing_device)) + if device_type != "raid1" { + c.Fatalf("expected mountpoint backed by raid1, but got %q", device_type) } - return false } diff --git a/mantle/kola/tests/misc/files.go b/mantle/kola/tests/misc/files.go index b2ed9de2..535636a8 100644 --- a/mantle/kola/tests/misc/files.go +++ b/mantle/kola/tests/misc/files.go @@ -16,6 +16,7 @@ package misc import ( "fmt" + "runtime" "strings" "github.com/coreos/mantle/kola/cluster" @@ -26,100 +27,18 @@ func init() { register.RegisterTest(®ister.Test{ Run: Filesystem, ClusterSize: 1, - Name: "nestos.filesystem", + Name: "fcos.filesystem", Distros: []string{"fcos", "nestos"}, }) } func Filesystem(c cluster.TestCluster) { - c.Run("suid", SUIDFiles) - c.Run("sgid", SGIDFiles) c.Run("writablefiles", WritableFiles) c.Run("writabledirs", WritableDirs) c.Run("stickydirs", StickyDirs) c.Run("denylist", Denylist) } -func sugidFiles(c cluster.TestCluster, validfiles []string, mode string) { - m := c.Machines()[0] - badfiles := make([]string, 0) - - output := c.MustSSH(m, fmt.Sprintf("sudo find / -ignore_readdir_race -path /sys -prune -o -path /proc -prune -o -path /sysroot/ostree -prune -o -type f -perm -%v -print", mode)) - - if string(output) == "" { - return - } - - files := strings.Split(string(output), "\n") - for _, file := range files { - var valid bool - - for _, validfile := range validfiles { - if file == validfile { - valid = true - } - } - if !valid { - badfiles = append(badfiles, file) - } - } - - if len(badfiles) != 0 { - c.Fatalf("Unknown SUID or SGID files found: %v", badfiles) - } -} - -func SUIDFiles(c cluster.TestCluster) { - validfiles := []string{ - "/usr/bin/chage", - "/usr/bin/chfn", - "/usr/bin/chsh", - "/usr/bin/expiry", - "/usr/bin/fusermount", - "/usr/bin/fusermount3", - "/usr/bin/gpasswd", - "/usr/bin/ksu", - "/usr/bin/man", - "/usr/bin/mandb", - "/usr/bin/mount", - "/usr/bin/newgidmap", - "/usr/bin/newgrp", - "/usr/bin/newuidmap", - "/usr/bin/passwd", - "/usr/bin/pkexec", - "/usr/bin/umount", - "/usr/bin/su", - "/usr/bin/sudo", - "/usr/lib/polkit-1/polkit-agent-helper-1", - "/usr/lib64/polkit-1/polkit-agent-helper-1", - "/usr/libexec/dbus-daemon-launch-helper", - "/usr/libexec/sssd/krb5_child", - "/usr/libexec/sssd/ldap_child", - "/usr/libexec/sssd/selinux_child", - "/usr/sbin/mount.nfs", - "/usr/sbin/unix_chkpwd", - "/usr/sbin/grub2-set-bootflag", - "/usr/sbin/mount.nfs", - "/usr/sbin/pam_timestamp_check", - "/usr/libexec/dbus-1/dbus-daemon-launch-helper", - } - - sugidFiles(c, validfiles, "4000") -} - -func SGIDFiles(c cluster.TestCluster) { - validfiles := []string{ - "/usr/bin/write", - "/usr/libexec/openssh/ssh-keysign", - "/usr/libexec/utempter/utempter", - "/usr/bin/cgclassify", - "/usr/bin/cgexec", - "/usr/bin/wall", - } - - sugidFiles(c, validfiles, "2000") -} - func WritableFiles(c cluster.TestCluster) { m := c.Machines()[0] @@ -159,7 +78,6 @@ func StickyDirs(c cluster.TestCluster) { "/tmp", "/var/tmp", "/run/user/1000/libpod", - "/run/ephemeral/var/tmp", } output := c.MustSSH(m, fmt.Sprintf("sudo find / -ignore_readdir_race -path %s -prune -o -type d -perm /1000 -print", strings.Join(ignore, " -prune -o -path "))) @@ -185,7 +103,6 @@ func Denylist(c cluster.TestCluster) { denylist := []string{ // Things excluded from the image that might slip in - "/usr/bin/perl", "/usr/bin/python", "/usr/share/man", @@ -204,6 +121,11 @@ func Denylist(c cluster.TestCluster) { "*\x7f*", } + // https://github.com/coreos/fedora-coreos-tracker/issues/1217 + if runtime.GOARCH != "s390x" { + denylist = append(denylist, "/usr/bin/perl") + } + output := c.MustSSH(m, fmt.Sprintf("sudo find / -ignore_readdir_race -path %s -prune -o -path '%s' -print", strings.Join(skip, " -prune -o -path "), strings.Join(denylist, "' -print -o -path '"))) if string(output) != "" { diff --git a/mantle/kola/tests/misc/multipath.go b/mantle/kola/tests/misc/multipath.go index e41c07b1..92979436 100644 --- a/mantle/kola/tests/misc/multipath.go +++ b/mantle/kola/tests/misc/multipath.go @@ -17,6 +17,8 @@ package misc import ( "strings" + coreosarch "github.com/coreos/stream-metadata-go/arch" + "github.com/coreos/mantle/kola/cluster" "github.com/coreos/mantle/kola/register" "github.com/coreos/mantle/platform" @@ -37,34 +39,6 @@ variant: fcos version: 1.4.0 systemd: units: - - name: multipathd.service - enabled: true - contents: | - [Unit] - Description=Device-Mapper Multipath Device Controller - Wants=systemd-udev-trigger.service systemd-udev-settle.service - Before=iscsi.service iscsid.service lvm2-activation-early.service - Before=local-fs-pre.target blk-availability.service shutdown.target - After=multipathd.socket systemd-udev-trigger.service systemd-udev-settle.service - ConditionPathExists=/etc/multipath.conf - DefaultDependencies=no - Conflicts=shutdown.target - ConditionKernelCommandLine=!nompath - ConditionKernelCommandLine=!multipath=off - ConditionVirtualization=!container - - [Service] - Type=notify - NotifyAccess=main - LimitCORE=infinity - ExecStartPre=-/sbin/modprobe -a scsi_dh_alua scsi_dh_emc scsi_dh_rdac dm-multipath - ExecStart=/sbin/multipathd -d -s - ExecReload=/sbin/multipathd reconfigure - TasksMax=infinity - - [Install] - WantedBy=sysinit.target - Also=multipathd.socket - name: mpath-configure.service enabled: true contents: | @@ -89,12 +63,17 @@ systemd: ConditionFirstBoot=true Requires=dev-mapper-mpatha.device After=dev-mapper-mpatha.device + # See https://github.com/coreos/coreos-assembler/pull/2457 + # and https://github.com/openshift/os/issues/743 + After=ostree-remount.service Before=kubelet.service DefaultDependencies=no [Service] Type=oneshot ExecStart=/usr/sbin/mkfs.xfs -L containers -m reflink=1 /dev/mapper/mpatha + # This is usually created by tmpfiles.d, but we run earlier than that. + ExecStart=/usr/bin/mkdir -p /var/lib/containers [Install] WantedBy=multi-user.target @@ -103,8 +82,6 @@ systemd: contents: | [Unit] Description=Mount /var/lib/containers - # See https://github.com/coreos/coreos-assembler/pull/2457 - After=ostree-remount.service After=mpath-var-lib-containers.service Before=kubelet.service @@ -119,26 +96,28 @@ systemd: func init() { register.RegisterTest(®ister.Test{ - Name: "multipath.day1", - Run: runMultipathDay1, - ClusterSize: 1, - Platforms: []string{"qemu-unpriv"}, - Distros: []string{"fcos"}, - UserData: mpath_on_boot_day1, - MultiPathDisk: true, + Name: "multipath.day1", + Run: runMultipathDay1, + ClusterSize: 1, + Platforms: []string{"qemu-unpriv"}, + ExcludeDistros: []string{"nestos"}, + UserData: mpath_on_boot_day1, + MultiPathDisk: true, }) register.RegisterTest(®ister.Test{ - Name: "multipath.day2", - Run: runMultipathDay2, - ClusterSize: 1, - Platforms: []string{"qemu-unpriv"}, - Distros: []string{"fcos"}, + Name: "multipath.day2", + Run: runMultipathDay2, + ClusterSize: 1, + Platforms: []string{"qemu-unpriv"}, + ExcludeDistros: []string{"nestos"}, + MultiPathDisk: true, }) register.RegisterTest(®ister.Test{ Name: "multipath.partition", Run: runMultipathPartition, ClusterSize: 1, Platforms: []string{"qemu-unpriv"}, + ExcludeDistros: []string{"nestos"}, UserData: mpath_on_var_lib_containers, AdditionalDisks: []string{"1G:mpath"}, }) @@ -151,6 +130,25 @@ func verifyMultipathBoot(c cluster.TestCluster, m platform.Machine) { c.MustSSH(m, "test -f /etc/multipath.conf") } +func verifyBootDropins(c cluster.TestCluster, m platform.Machine, checkBootuuid bool) { + // Check that we took ownership of the rootfs + c.RunCmdSync(m, "sudo test -f /boot/.root_uuid") + if checkBootuuid { + // Check for bootuuid dropins where available + switch coreosarch.CurrentRpmArch() { + case "s390x": + case "x86_64", "aarch64": + c.RunCmdSync(m, ` + sudo mount -o ro /dev/disk/by-label/EFI-SYSTEM /boot/efi + sudo sh -c 'test -f /boot/efi/EFI/*/bootuuid.cfg' + sudo umount /boot/efi`) + fallthrough + case "ppc64le": + c.RunCmdSync(m, "sudo test -f /boot/grub2/bootuuid.cfg") + } + } +} + func verifyMultipath(c cluster.TestCluster, m platform.Machine, path string) { srcdev := string(c.MustSSHf(m, "findmnt -nvr %s -o SOURCE", path)) if !strings.HasPrefix(srcdev, "/dev/mapper/mpath") { @@ -165,6 +163,7 @@ func runMultipathDay1(c cluster.TestCluster) { c.Fatalf("Failed to reboot the machine: %v", err) } verifyMultipathBoot(c, m) + verifyBootDropins(c, m, true) } func runMultipathDay2(c cluster.TestCluster) { @@ -174,6 +173,7 @@ func runMultipathDay2(c cluster.TestCluster) { c.Fatalf("Failed to reboot the machine: %v", err) } verifyMultipathBoot(c, m) + verifyBootDropins(c, m, false) } func runMultipathPartition(c cluster.TestCluster) { diff --git a/mantle/kola/tests/misc/network.go b/mantle/kola/tests/misc/network.go index 39391725..a1f63841 100644 --- a/mantle/kola/tests/misc/network.go +++ b/mantle/kola/tests/misc/network.go @@ -35,17 +35,16 @@ func init() { Run: NetworkListeners, ClusterSize: 1, Name: "nestos.network.listeners", - Distros: []string{"fcos","nestos"}, + Distros: []string{"fcos", "nestos"}, // be sure to notice listeners in the docker stack UserData: conf.EmptyIgnition(), }) // TODO: rewrite test for NetworkManager register.RegisterTest(®ister.Test{ - Run: NetworkInitramfsSecondBoot, - ClusterSize: 1, - Name: "coreos.network.initramfs.second-boot", - ExcludePlatforms: []string{"do"}, - ExcludeDistros: []string{"fcos", "rhcos", "nestos"}, + Run: NetworkInitramfsSecondBoot, + ClusterSize: 1, + Name: "coreos.network.initramfs.second-boot", + ExcludeDistros: []string{"fcos", "rhcos", "nestos"}, }) // This test follows the same network configuration used on https://github.com/RHsyseng/rhcos-slb // with a slight change, where the MCO script is run from ignition: https://github.com/RHsyseng/rhcos-slb/blob/main/setup-ovs.sh. @@ -136,9 +135,6 @@ NextProcess: continue } process := processStr[1 : len(processStr)-1] - if process == "rpcbind"{ - continue - } thisListener := listener{ process: process, protocol: proto, @@ -169,7 +165,9 @@ func NetworkListeners(c cluster.TestCluster) { // https://serverfault.com/a/929642 {"tcp", "5355", "systemd-resolve"}, {"udp", "5355", "systemd-resolve"}, - {"udp", "68", "dhclient"}, + // DHCPv6 from NetworkManager (when IPv6 network available) + // https://github.com/coreos/fedora-coreos-tracker/issues/1216 + {"udp", "546", "NetworkManager"}, } checkList := func() error { return checkListeners(c, expectedListeners) @@ -318,7 +316,8 @@ var ( # Run a dnsmasq service on the network_namespace, to set the host-side veth ends a ip via their MAC addresses echo -e "dhcp-range=192.168.0.50,192.168.0.60,255.255.255.0,12h\ndhcp-host=${primary_mac},${primary_ip}\ndhcp-host=${secondary_mac},${secondary_ip}" > /etc/dnsmasq.d/dhcp - ip netns exec ${network_namespace} dnsmasq & + # Disable interface=lo as new dnsmasq version has it by default + ip netns exec ${network_namespace} dnsmasq --except-interface=lo --bind-interfaces -u dnsmasq -g dnsmasq --conf-dir=/etc/dnsmasq.d,.rpmnew,.rpmsave,.rpmorig --conf-file=/dev/null & # Tell NM to manage the "veth-host" interface and bring it up (will attempt DHCP). # Do this after we start dnsmasq so we don't have to deal with DHCP timeouts. @@ -702,7 +701,7 @@ func setupMultipleNetworkTest(c cluster.TestCluster, primaryMac, secondaryMac st "name": "capture-macs.service" }, { - "contents": "[Unit]\nDescription=Setup OVS bonding\nBefore=ovs-configuration.service\nAfter=NetworkManager.service\nAfter=openvswitch.service\nAfter=capture-macs.service\n\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/setup-ovs\n\n[Install]\nRequiredBy=multi-user.target\n", + "contents": "[Unit]\nDescription=Setup OVS bonding\nBefore=ovs-configuration.service\nAfter=NetworkManager.service\nAfter=openvswitch.service\nAfter=capture-macs.service\nConditionKernelCommandLine=macAddressList\n\n[Service]\nType=oneshot\nExecStart=/usr/local/bin/setup-ovs\n\n[Install]\nRequiredBy=multi-user.target\n", "enabled": true, "name": "setup-ovs.service" } diff --git a/mantle/kola/tests/misc/selinux.go b/mantle/kola/tests/misc/selinux.go index d1cd7eb0..8d62bfee 100644 --- a/mantle/kola/tests/misc/selinux.go +++ b/mantle/kola/tests/misc/selinux.go @@ -39,7 +39,6 @@ func init() { Run: SelinuxBooleanPersist, ClusterSize: 1, Name: "rhcos.selinux.boolean.persist", - ExcludePlatforms: []string{"qemu-iso"}, }) register.RegisterTest(®ister.Test{ Run: SelinuxManage, diff --git a/mantle/kola/tests/misc/users.go b/mantle/kola/tests/misc/users.go index d3f675aa..f3206369 100644 --- a/mantle/kola/tests/misc/users.go +++ b/mantle/kola/tests/misc/users.go @@ -26,8 +26,8 @@ func init() { Run: CheckUserShells, ClusterSize: 1, ExcludePlatforms: []string{"gce"}, - Name: "nestos.users.shells", - Distros: []string{"fcos","nestos"}, + Name: "fcos.users.shells", + Distros: []string{"fcos", "nestos"}, }) } @@ -36,12 +36,11 @@ func CheckUserShells(c cluster.TestCluster) { var badusers []string ValidUsers := map[string]string{ - "sync": "/bin/sync", - "shutdown": "/sbin/shutdown", - "halt": "/sbin/halt", - "core": "/bin/bash", - "fedora-coreos-pinger": "/usr/sbin/nologin", - "zincati": "/usr/sbin/nologin", + "sync": "/bin/sync", + "shutdown": "/sbin/shutdown", + "halt": "/sbin/halt", + "nest": "/bin/bash", + "node_exporter": "/bin/nologin", } output := c.MustSSH(m, "getent passwd") diff --git a/mantle/kola/tests/ostree/basic.go b/mantle/kola/tests/ostree/basic.go index 999d163d..3d3b13e1 100644 --- a/mantle/kola/tests/ostree/basic.go +++ b/mantle/kola/tests/ostree/basic.go @@ -46,7 +46,6 @@ func init() { Flags: []register.Flag{register.RequiresInternetAccess}, // need network to contact remote FailFast: true, Tags: []string{"ostree"}, - ExcludePlatforms: []string{"qemu-iso"}, }) } @@ -58,7 +57,7 @@ type ostreeAdminStatus struct { // getOstreeRemotes returns the current number of ostree remotes on a machine func getOstreeRemotes(c cluster.TestCluster, m platform.Machine) (int, []string) { - remoteListOut := string(c.MustSSH(m, "sudo ostree remote list")) + remoteListOut := string(c.MustSSH(m, "ostree remote list")) numRemotes := 0 // If we get anything other than an empty string calculate the results // NOTE: This is needed as splitting "" ends up providing a count of 1 @@ -206,7 +205,7 @@ func ostreeRemoteTest(c cluster.TestCluster) { // verify `ostree remote list` c.Run("list", func(c cluster.TestCluster) { - osRemoteListOut := c.MustSSH(m, "sudo ostree remote list -u") + osRemoteListOut := c.MustSSH(m, "ostree remote list -u") osRemoteListSplit := strings.Split(string(osRemoteListOut), "\n") // should have original remote + newly added remote @@ -231,12 +230,12 @@ func ostreeRemoteTest(c cluster.TestCluster) { // verify `ostree remote show-url` c.Run("show-url", func(c cluster.TestCluster) { - c.AssertCmdOutputContains(m, ("sudo ostree remote show-url " + remoteName), remoteUrl) + c.AssertCmdOutputContains(m, ("ostree remote show-url " + remoteName), remoteUrl) }) // verify `ostree remote refs` c.Run("refs", func(c cluster.TestCluster) { - osRemoteRefsOut := c.MustSSH(m, ("sudo ostree remote refs " + remoteName)) + osRemoteRefsOut := c.MustSSH(m, ("ostree remote refs " + remoteName)) if len(strings.Split(string(osRemoteRefsOut), "\n")) < 1 { c.Fatalf(`Did not receive expected amount of refs from remote: %v`, string(osRemoteRefsOut)) } @@ -244,14 +243,14 @@ func ostreeRemoteTest(c cluster.TestCluster) { // verify `ostree remote summary` c.Run("summary", func(c cluster.TestCluster) { - remoteRefsOut := c.MustSSH(m, ("sudo ostree remote refs " + remoteName)) + remoteRefsOut := c.MustSSH(m, ("ostree remote refs " + remoteName)) remoteRefsOutSplit := strings.Split(string(remoteRefsOut), "\n") remoteRefsCount := len(remoteRefsOutSplit) if remoteRefsCount < 1 { c.Fatalf(`Did not find any refs on ostree remote: %q`, string(remoteRefsOut)) } - osRemoteSummaryOut := c.MustSSH(m, ("sudo ostree remote summary " + remoteName)) + osRemoteSummaryOut := c.MustSSH(m, ("ostree remote summary " + remoteName)) if len(strings.Split(string(osRemoteSummaryOut), "\n")) < 1 { c.Fatalf(`Did not receive expected summary content from remote: %v`, string(osRemoteSummaryOut)) } @@ -287,7 +286,7 @@ func ostreeRemoteTest(c cluster.TestCluster) { // verify `ostree remote delete` c.Run("delete", func(c cluster.TestCluster) { - preRemotesOut := c.MustSSH(m, "sudo ostree remote list") + preRemotesOut := c.MustSSH(m, "ostree remote list") preNumRemotes := len(strings.Split(string(preRemotesOut), "\n")) if preNumRemotes < 1 { diff --git a/mantle/kola/tests/ostree/unlock.go b/mantle/kola/tests/ostree/unlock.go index ff31a62b..126239fb 100644 --- a/mantle/kola/tests/ostree/unlock.go +++ b/mantle/kola/tests/ostree/unlock.go @@ -16,7 +16,6 @@ package ostree import ( "fmt" - "strings" "github.com/coreos/mantle/kola/cluster" "github.com/coreos/mantle/kola/register" @@ -33,7 +32,6 @@ func init() { Flags: []register.Flag{register.RequiresInternetAccess}, // need network to pull RPM FailFast: true, Tags: []string{"ostree"}, - ExcludePlatforms: []string{"qemu-iso"}, }) register.RegisterTest(®ister.Test{ Run: ostreeHotfixTest, @@ -42,7 +40,6 @@ func init() { Name: "ostree.hotfix", FailFast: true, Tags: []string{"ostree"}, - ExcludePlatforms: []string{"qemu-iso"}, // enable debugging for https://github.com/coreos/fedora-coreos-tracker/issues/942 // we can drop it once we resolved it UserData: conf.Butane(` @@ -61,7 +58,7 @@ systemd: } var ( - rpmUrl string = "https://gitee.com/openeuler/nestos-assembler/tree/master/mantle/kola/tests/rpm/aht-dummy-1.0-1.noarch.rpm" + rpmUrl string = "https://www.nestos.org.cn/kola/aht-dummy-1.0-1.noarch.rpm" rpmName string = "aht-dummy" ) @@ -130,9 +127,13 @@ func rpmUninstallVerify(c cluster.TestCluster, m platform.Machine, rpmName strin return fmt.Errorf(`Failed to uninstall RPM: %v`, uninstallErr) } + _, missCmdErr := c.SSH(m, ("command -v " + rpmName)) + if missCmdErr == nil { + return fmt.Errorf(`Found a binary that should not be there: %v`, missCmdErr) + } _, missRpmErr := c.SSH(m, ("rpm -q " + rpmName)) - if missRpmErr != nil { + if missRpmErr == nil { return fmt.Errorf(`RPM incorrectly in rpmdb after RPM uninstall: %v`, missRpmErr) } @@ -188,14 +189,13 @@ func ostreeUnlockTest(c cluster.TestCluster) { c.Fatalf(`Deployment was incorrectly unlocked; got: %q`, ros.Deployments[0].Unlocked) } - - result, secRpmErr := c.SSH(m, ("rpm -q " + rpmName)) + _, secCmdErr := c.SSH(m, ("command -v " + rpmName)) + if secCmdErr == nil { + c.Fatalf(`Binary was incorrectly found after reboot`) + } + _, secRpmErr := c.SSH(m, ("rpm -q " + rpmName)) if secRpmErr == nil { - if strings.Contains(string(result), "aht-dummy is not installed"){ - fmt.Println("package aht-dummy is not installed") - } else { - c.Fatalf(`RPM incorrectly in rpmdb after reboot`) - } + c.Fatalf(`RPM incorrectly in rpmdb after reboot`) } }) } @@ -249,6 +249,8 @@ func ostreeHotfixTest(c cluster.TestCluster) { c.Fatalf(`Hotfix mode was not detected; got: %q`, ros.Deployments[0].Unlocked) } + c.RunCmdSync(m, ("command -v " + rpmName)) + c.RunCmdSync(m, ("rpm -q " + rpmName)) }) @@ -270,11 +272,13 @@ func ostreeHotfixTest(c cluster.TestCluster) { c.Fatalf(`Rollback did not remove hotfix mode; got: %q`, rollbackStatus.Deployments[0].Unlocked) } - result, _ := c.SSH(m, ("rpm -q " + rpmName)) - if result != nil{ - if !strings.Contains(string(result), "package aht-dummy is not installed"){ - c.Fatalf(`RPM incorrectly in rpmdb after reboot`) - } + _, secCmdErr := c.SSH(m, ("command -v " + rpmName)) + if secCmdErr == nil { + c.Fatalf(`Binary was incorrectly found after reboot`) + } + _, secRpmErr := c.SSH(m, ("rpm -q " + rpmName)) + if secRpmErr == nil { + c.Fatalf(`RPM incorrectly in rpmdb after reboot`) } }) } diff --git a/mantle/kola/tests/podman/podman.go b/mantle/kola/tests/podman/podman.go index c51919a2..9b9bd290 100644 --- a/mantle/kola/tests/podman/podman.go +++ b/mantle/kola/tests/podman/podman.go @@ -135,7 +135,7 @@ func podmanWorkflow(c cluster.TestCluster) { cmd := fmt.Sprintf("echo TEST PAGE > %s/index.html", string(dir)) c.RunCmdSync(m, cmd) - cmd = fmt.Sprintf("sudo podman run -d -p 80:80 -v %s/index.html:%s/index.html:z --name nginx %s", string(dir), wwwRoot, image) + cmd = fmt.Sprintf("sudo podman run -d -p 80:80 -v %s/index.html:%s/index.html:z %s", string(dir), wwwRoot, image) out := c.MustSSH(m, cmd) id = string(out)[0:64] @@ -254,11 +254,11 @@ func podmanWorkflow(c cluster.TestCluster) { c.Run("delete", func(c cluster.TestCluster) { cmd := fmt.Sprintf("sudo podman rmi %s", image) out := c.MustSSH(m, cmd) - // imageID := string(out) + imageID := string(out) - cmd = fmt.Sprintf("sudo podman images | grep %s", image) + cmd = fmt.Sprintf("sudo podman images | grep %s", imageID) out, err := c.SSH(m, cmd) - if err != nil { + if err == nil { c.Fatalf("Image should be deleted but found %s", string(out)) } }) @@ -331,7 +331,7 @@ func podmanNetworksReliably(c cluster.TestCluster) { numPass := strings.Count(string(output), "PASS") - if numPass != 100 { - c.Fatalf("Expected 100 passes, but output was: %s", output) + if numPass <= 98 { + c.Fatalf("Expected more than or equal to 98/100 passes, but output was: %s", output) } } diff --git a/mantle/kola/tests/rhcos/upgrade.go b/mantle/kola/tests/rhcos/upgrade.go index b312e286..92afdbd2 100644 --- a/mantle/kola/tests/rhcos/upgrade.go +++ b/mantle/kola/tests/rhcos/upgrade.go @@ -24,7 +24,9 @@ import ( "strings" "time" - "github.com/coreos/coreos-assembler-schema/cosa" + cosa "github.com/coreos/coreos-assembler/pkg/builds" + coreosarch "github.com/coreos/stream-metadata-go/arch" + "github.com/coreos/mantle/kola" "github.com/coreos/mantle/kola/cluster" "github.com/coreos/mantle/kola/register" @@ -32,7 +34,6 @@ import ( "github.com/coreos/mantle/platform" "github.com/coreos/mantle/platform/conf" "github.com/coreos/mantle/platform/machine/unprivqemu" - "github.com/coreos/mantle/system" installer "github.com/coreos/mantle/util" ) @@ -120,7 +121,7 @@ func setup(c cluster.TestCluster) { outputname="%s" commit="%s" ostree --repo=tmp/repo-cache init --mode=bare-user - rpm-ostree ex-container import --repo=tmp/repo ostree-unverified-image:oci-archive:$tarname:latest + ostree container unencapsulate --repo=tmp/repo ostree-unverified-image:oci-archive:$tarname:latest ostree --repo=tmp/repo pull-local tmp/repo-cache "$commit" tar -cf "$outputname" -C tmp/repo . rm tmp/repo-cache -rf @@ -309,7 +310,9 @@ func downloadLatestReleasedRHCOS(target string) (string, error) { graph := &Graph{} graphUrl := fmt.Sprintf("https://api.openshift.com/api/upgrades_info/v1/graph?channel=%s", channel) - getJson(graphUrl, &graph) + if err := getJson(graphUrl, &graph); err != nil { + return "", err + } // no-op on unreleased OCP versions if len(graph.Nodes) == 0 { @@ -355,31 +358,28 @@ func downloadLatestReleasedRHCOS(target string) (string, error) { return }(releaseIndex, unique) - // The origin-clients package in Fedora doesn't `oc adm release info` - // ability. - ocUrl := fmt.Sprintf("https://mirror.openshift.com/pub/openshift-v4/%s/clients/ocp/latest/openshift-client-linux.tar.gz", system.RpmArch()) - cmdString := fmt.Sprintf("curl -Ls %s | sudo tar -zxvf - -C /usr/bin", ocUrl) - if err := exec.Command("bash", "-c", cmdString).Run(); err != nil { - return "", err - } - var ocpRelease *OcpRelease latestOcpPayload := graph.Nodes[difference[0]].Payload - cmd := exec.Command("oc", "adm", "release", "info", latestOcpPayload, "-o", "json") + // oc should be included in cosa since https://github.com/coreos/coreos-assembler/pull/2777 + cmd := exec.Command("/usr/bin/oc", "adm", "release", "info", latestOcpPayload, "-o", "json") output, err := cmd.Output() if err != nil { return "", err } - json.Unmarshal(output, &ocpRelease) + if err = json.Unmarshal(output, &ocpRelease); err != nil { + return "", err + } var latestOcpRhcosBuild *cosa.Build rhcosVersion := ocpRelease.DisplayVersions.MachineOS.Version latestBaseUrl := fmt.Sprintf("https://rhcos-redirector.apps.art.xq1c.p1.openshiftapps.com/art/storage/releases/rhcos-%s/%s/%s", ocpVersionF, rhcosVersion, - system.RpmArch()) + coreosarch.CurrentRpmArch()) latestRhcosBuildMetaUrl := fmt.Sprintf("%s/meta.json", latestBaseUrl) - getJson(latestRhcosBuildMetaUrl, &latestOcpRhcosBuild) + if err := getJson(latestRhcosBuildMetaUrl, &latestOcpRhcosBuild); err != nil { + return "", err + } latestRhcosQcow2 := latestOcpRhcosBuild.BuildArtifacts.Qemu.Path latestRhcosQcow2Url := fmt.Sprintf("%s/%s", latestBaseUrl, latestRhcosQcow2) diff --git a/mantle/kola/tests/rpmostree/deployments.go b/mantle/kola/tests/rpmostree/deployments.go index 9b3f1a6f..5aa7afe5 100644 --- a/mantle/kola/tests/rpmostree/deployments.go +++ b/mantle/kola/tests/rpmostree/deployments.go @@ -32,7 +32,6 @@ func init() { Name: "rpmostree.upgrade-rollback", FailFast: true, Tags: []string{"rpm-ostree", "upgrade"}, - ExcludePlatforms: []string{"qemu-iso"}, }) register.RegisterTest(®ister.Test{ Run: rpmOstreeInstallUninstall, @@ -43,6 +42,23 @@ func init() { UserData: conf.Ignition(`{ "ignition": { "version": "3.1.0" + }, + "storage": { + "files": [ + { + "path": "/var/home/nest/aht-dummy.rpm", + "user": { + "name": "nest" + }, + "contents": { + "source": "https://github.com/projectatomic/atomic-host-tests/raw/master/rpm/aht-dummy-1.0-1.noarch.rpm", + "verification": { + "hash": "sha512-da29ae637b30647cab2386a2ce6b4223c3ad7120ae8dd32d9ce275f26a11946400bba0b86f6feabb9fb83622856ef39f8cecf14b4975638c4d8c0cf33b0f7b26" + } + }, + "mode": 420 + } + ] } } `), @@ -78,8 +94,11 @@ func rpmOstreeUpgradeRollback(c cluster.TestCluster) { createCommit := "sudo ostree commit -b " + newBranch + " --tree ref=" + originalCsum + " --add-metadata-string version=" + newVersion newCommit := c.MustSSH(m, createCommit) + // And no zincati because we're intentionally overriding, also it fails + c.RunCmdSync(m, "sudo systemctl mask --now zincati") + // use "rpm-ostree rebase" to get to the "new" commit - c.RunCmdSync(m, "sudo rpm-ostree rebase :"+newBranch+" --bypass-driver") + c.RunCmdSync(m, "sudo rpm-ostree rebase :"+newBranch) // get latest rpm-ostree status output to check validity postUpgradeStatus, err := util.GetRpmOstreeStatusJSON(c, m) @@ -169,20 +188,20 @@ func rpmOstreeUpgradeRollback(c cluster.TestCluster) { // This uses a dummy RPM that was originally created for the atomic-host-tests; // see: https://github.com/projectatomic/atomic-host-tests func rpmOstreeInstallUninstall(c cluster.TestCluster) { - var ahtRpmPath = "/var/home/core/aht-dummy-1.0-1.noarch.rpm" + var ahtRpmPath = "/var/home/nest/aht-dummy-1.0-1.noarch.rpm" var installPkgName = "aht-dummy-1.0-1.noarch" var installBinName = "aht-dummy" var installBinPath string - if c.Distribution() == "fcos" { + if c.Distribution() == "nestos" { installBinPath = fmt.Sprintf("/usr/bin/%v", installBinName) } else { installBinPath = fmt.Sprintf("/bin/%v", installBinName) } m := c.Machines()[0] - - _, err := c.SSH(m, `sudo wget http://1.203.97.152/kola/aht-dummy-1.0-1.noarch.rpm`) + + _, err := c.SSH(m, `sudo wget -P /var/home/nest http://www.nestos.org.cn/kola/aht-dummy-1.0-1.noarch.rpm`) if err != nil { c.Fatal(err) } @@ -200,7 +219,8 @@ func rpmOstreeInstallUninstall(c cluster.TestCluster) { c.Run("install", func(c cluster.TestCluster) { // install package and reboot - c.RunCmdSync(m, "sudo rpm-ostree install "+ahtRpmPath) + // this is only testing local rpm install, `--cache-only` avoid fetching RPM data from remote + c.RunCmdSync(m, "sudo rpm-ostree install --cache-only "+ahtRpmPath) installRebootErr := m.Reboot() if installRebootErr != nil { diff --git a/mantle/kola/tests/rpmostree/rebase.go b/mantle/kola/tests/rpmostree/rebase.go index e40c29e7..f88bb5ea 100644 --- a/mantle/kola/tests/rpmostree/rebase.go +++ b/mantle/kola/tests/rpmostree/rebase.go @@ -18,11 +18,9 @@ func init(){ } func rpmOstreeRebase(c cluster.TestCluster) { - - m := c.Machines()[0] arch := c.MustSSH(m, "uname -m") - var newBranch string = "ostree-unverified-image:docker://docker.io/fushanqing/nestos-test:latest-" + string(arch) + var newBranch string = "ostree-unverified-registry:hub.oepkgs.net/nestos/nestos-test:22.03-LTS-SP2.20230922.0-" + string(arch) originalStatus, err := util.GetRpmOstreeStatusJSON(c, m) if err != nil{ @@ -33,10 +31,9 @@ func rpmOstreeRebase(c cluster.TestCluster) { c.Fatalf(`Unexpected results from "rpm-ostree status"; received: %v`, originalStatus) } - c.Run("ostree upgrade",func(c cluster.TestCluster){ - + c.Run("ostree upgrade",func(c cluster.TestCluster){ // use "rpm-ostree rebase" to get to the "new" commit - _ = c.MustSSH(m, "sudo rpm-ostree rebase --experimental "+newBranch+" --bypass-driver") + _ = c.MustSSH(m, "sudo systemctl start docker.service && sudo rpm-ostree rebase --experimental "+newBranch+" --bypass-driver") // get latest rpm-ostree status output to check validity postUpgradeStatus, err := util.GetRpmOstreeStatusJSON(c, m) if err != nil { diff --git a/mantle/kola/tests/upgrade/basic.go b/mantle/kola/tests/upgrade/basic.go index b5f3291e..9a7800fe 100644 --- a/mantle/kola/tests/upgrade/basic.go +++ b/mantle/kola/tests/upgrade/basic.go @@ -35,7 +35,6 @@ import ( const workdir = "/var/srv/upgrade" const ostreeRepo = workdir + "/repo" -const zincatiMetricsSocket = "/run/zincati/public/metrics.promsock" var plog = capnslog.NewPackageLogger("github.com/coreos/mantle", "kola/tests/upgrade") @@ -79,11 +78,11 @@ func init() { { "name": "kolet-httpd.path", "enabled": true, - "contents": "[Path]\nPathExists=/var/home/core/kolet\n[Install]\nWantedBy=multi-user.target" + "contents": "[Path]\nPathExists=/var/home/nest/kolet\n[Install]\nWantedBy=multi-user.target" }, { "name": "kolet-httpd.service", - "contents": "[Service]\nExecStart=/var/home/core/kolet run fcos.upgrade.basic httpd -v\n[Install]\nWantedBy=multi-user.target" + "contents": "[Service]\nExecStart=/var/home/nest/kolet run fcos.upgrade.basic httpd -v\n[Install]\nWantedBy=multi-user.target" } ] }, @@ -116,7 +115,7 @@ func init() { "path": "WORKDIR", "mode": 493, "user": { - "name": "core" + "name": "nest" } } ] @@ -352,7 +351,7 @@ func rebaseToStream(c cluster.TestCluster, m platform.Machine, ref, version stri func httpd() error { http.Handle("/", http.FileServer(http.Dir(ostreeRepo))) http.HandleFunc("/v1/graph", func(w http.ResponseWriter, r *http.Request) { - http.ServeFile(w, r, "/var/home/core/graph.json") + http.ServeFile(w, r, "/var/home/nest/graph.json") }) plog.Info("Starting server") return http.ListenAndServe("localhost:8080", nil) diff --git a/mantle/kola/tests/util/containers.go b/mantle/kola/tests/util/containers.go index 29142dac..33251160 100644 --- a/mantle/kola/tests/util/containers.go +++ b/mantle/kola/tests/util/containers.go @@ -23,18 +23,27 @@ import ( // GenPodmanScratchContainer creates a podman scratch container out of binaries from the host func GenPodmanScratchContainer(c cluster.TestCluster, m platform.Machine, name string, binnames []string) { + // Scratch containers are created by copying a binary and its shared libraries dependencies + // into the container image. `ldd` is used to find the paths to the shared libraries. On + // power9, some shared libraries were symlinked to versioned shared libraries using the + // versioned filename as the target. For example, libm.so.6 would be copied into the scratch + // container as libm-2.28.so. When the versioned shared libraries were copied into the scratch + // container, the dynamic linker could not find the non-versioned filenames. The ld.so.cache + // seemed to have symlinks to the versioned shared libraries. Deleting /etc/ld.so.cache + // restored symlinks to the non-versioned shared libraries. cmd := `tmpdir=$(mktemp -d); cd $tmpdir; echo -e "FROM scratch\nCOPY . /" > Dockerfile; - b=$(which %s); libs=$(sudo ldd $b | grep -o /lib'[^ ]*' | sort -u); - sudo rsync -av --relative --copy-links $b $libs ./; - sudo podman build --network host --layers=false -t localhost/%s .` + b=$(which %s); libs=$(sudo ldd $b | grep -o /lib'[^ ]*' | sort -u); + sudo rsync -av --relative --copy-links $b $libs ./; + sudo podman build --network host --layers=false -t localhost/%s .` c.RunCmdSyncf(m, cmd, strings.Join(binnames, " "), name) } // GenIsulaScratchContainer creates a podman scratch container out of binaries from the host func GenIsulaScratchContainer(c cluster.TestCluster, m platform.Machine, name string, binnames []string) { cmd := `tmpdir=$(mktemp -d); cd $tmpdir; echo -e "FROM scratch\nCOPY . /" > Dockerfile; - b=$(which %s); libs=$(sudo ldd $b | grep -o /lib'[^ ]*' | sort -u); - sudo rsync -av --relative --copy-links $b $libs ./; - sudo isula-build ctr-img build -f Dockerfile -o isulad:echo1:latest .` + sudo rm -f /etc/ld.so.cache; + b=$(which %s); libs=$(sudo ldd $b | grep -o /lib'[^ ]*' | sort -u); + sudo rsync -av --relative --copy-links $b $libs ./; + sudo isula-build ctr-img build -f Dockerfile -o isulad:echo1:latest .` c.RunCmdSyncf(m, cmd, strings.Join(binnames, " ")) -} \ No newline at end of file +} diff --git a/mantle/network/ssh.go b/mantle/network/ssh.go index 1a1a97b8..3c066cbf 100644 --- a/mantle/network/ssh.go +++ b/mantle/network/ssh.go @@ -15,9 +15,8 @@ package network import ( - "crypto/ecdsa" - "crypto/elliptic" "crypto/rand" + "crypto/rsa" "fmt" "io/ioutil" "net" @@ -30,7 +29,8 @@ import ( const ( defaultPort = 22 - defaultUser = "core" + defaultUser = "nest" + rsaKeySize = 2048 ) // DefaultSSHDir is a process-global path that can be set, and @@ -45,7 +45,7 @@ type Dialer interface { // SSHAgent can manage keys, updates cloud config, and loves ponies. // The embedded dialer is used for establishing new SSH connections. type SSHAgent struct { - agent.Agent + agent.ExtendedAgent Dialer User string Socket string @@ -57,14 +57,14 @@ type SSHAgent struct { // NewSSHAgent constructs a new SSHAgent using dialer to create ssh // connections. func NewSSHAgent(dialer Dialer) (*SSHAgent, error) { - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + key, err := rsa.GenerateKey(rand.Reader, rsaKeySize) if err != nil { return nil, err } addedkey := agent.AddedKey{ PrivateKey: key, - Comment: "core@default", + Comment: "nest@default", } keyring := agent.NewKeyring() @@ -99,13 +99,13 @@ func NewSSHAgent(dialer Dialer) (*SSHAgent, error) { } a := &SSHAgent{ - Agent: keyring, - Dialer: dialer, - User: defaultUser, - Socket: sockPath, - sockDir: sockDir, - sockdirOwned: sockdirOwned, - listener: listener, + ExtendedAgent: keyring.(agent.ExtendedAgent), + Dialer: dialer, + User: defaultUser, + Socket: sockPath, + sockDir: sockDir, + sockdirOwned: sockdirOwned, + listener: listener, } go func() { diff --git a/mantle/network/ssh_test.go b/mantle/network/ssh_test.go index eb4a7709..8a5b451a 100644 --- a/mantle/network/ssh_test.go +++ b/mantle/network/ssh_test.go @@ -71,7 +71,7 @@ func TestSSHNewClient(t *testing.T) { cfg := ssh.ServerConfig{ PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) { - if conn.User() == "core" && bytes.Equal(key.Marshal(), keys[0].Marshal()) { + if conn.User() == "nest" && bytes.Equal(key.Marshal(), keys[0].Marshal()) { return nil, nil } return nil, fmt.Errorf("pubkey rejected") diff --git a/mantle/platform/api/aliyun/api.go b/mantle/platform/api/aliyun/api.go index d43ce876..c01e2fd4 100644 --- a/mantle/platform/api/aliyun/api.go +++ b/mantle/platform/api/aliyun/api.go @@ -146,7 +146,12 @@ func (a *API) CopyImage(source_id, dest_name, dest_region, dest_description, kms // queuing would explain some of the delays observed. if wait_for_ready { plog.Infof("waiting for %v in %v to be available before returning", response.ImageId, dest_region) - a.WaitForImageReady(dest_region, response.ImageId) + if err = a.WaitForImageReady(dest_region, response.ImageId); err != nil { + // Waiting failed... Let's just log it and move on since this is + // just to be nice. if we do have to use the image right after, + // we'll fail. + plog.Warningf("failed to wait: %v", err) + } } return response.ImageId, nil } diff --git a/mantle/platform/api/aws/ami.go b/mantle/platform/api/aws/ami.go deleted file mode 100644 index 268cb99d..00000000 --- a/mantle/platform/api/aws/ami.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aws - -import ( - "encoding/json" - "fmt" - "net/http" - "sync" -) - -// relaseAMIs matches the structure of the AMIs listed in our -// coreos_production_ami_all.json release file -type releaseAMIs struct { - AMIS []struct { - Name string `json:"name"` - HVM string `json:"hvm"` - } `json:"amis"` -} - -var amiCache struct { - alphaOnce sync.Once - alphaAMIs *releaseAMIs - - betaOnce sync.Once - betaAMIs *releaseAMIs - - stableOnce sync.Once - stableAMIs *releaseAMIs -} - -// resolveAMI is used to minimize network requests while allowing resolution of -// release channels to specific AMI ids. -// If any issue occurs attempting to resolve a given AMI, e.g. a network error, -// this method panics. -func resolveAMI(ami string, region string) string { - resolveChannel := func(channel string) *releaseAMIs { - resp, err := http.DefaultClient.Get(fmt.Sprintf("https://%s.release.core-os.net/amd64-usr/current/coreos_production_ami_all.json", channel)) - if err != nil { - panic(fmt.Errorf("unable to fetch %v AMI json: %v", channel, err)) - } - - var amis releaseAMIs - err = json.NewDecoder(resp.Body).Decode(&amis) - if err != nil { - panic(fmt.Errorf("unable to parse release bucket %v AMI: %v", channel, err)) - } - return &amis - } - - var channelAmis *releaseAMIs - switch ami { - case "alpha": - amiCache.alphaOnce.Do(func() { - amiCache.alphaAMIs = resolveChannel(ami) - }) - channelAmis = amiCache.alphaAMIs - case "beta": - amiCache.betaOnce.Do(func() { - amiCache.betaAMIs = resolveChannel(ami) - }) - channelAmis = amiCache.betaAMIs - case "stable": - amiCache.stableOnce.Do(func() { - amiCache.stableAMIs = resolveChannel(ami) - }) - channelAmis = amiCache.stableAMIs - default: - return ami - } - - for _, ami := range channelAmis.AMIS { - if ami.Name == region { - return ami.HVM - } - } - panic(fmt.Sprintf("could not find %v ami in %+v", ami, amiCache.alphaAMIs.AMIS)) -} diff --git a/mantle/platform/api/aws/api.go b/mantle/platform/api/aws/api.go index 99e6fbe1..3d1e390f 100644 --- a/mantle/platform/api/aws/api.go +++ b/mantle/platform/api/aws/api.go @@ -87,8 +87,6 @@ func New(opts *Options) (*API, error) { return nil, err } - opts.AMI = resolveAMI(opts.AMI, opts.Region) - api := &API{ session: sess, ec2: ec2.New(sess), @@ -115,15 +113,19 @@ func (a *API) PreflightCheck() error { return err } -func tagSpecCreatedByMantle(resourceType string) []*ec2.TagSpecification { +func tagSpecCreatedByMantle(name, resourceType string) []*ec2.TagSpecification { return []*ec2.TagSpecification{ { ResourceType: aws.String(resourceType), Tags: []*ec2.Tag{ - &ec2.Tag{ + { Key: aws.String("CreatedBy"), Value: aws.String("mantle"), }, + { + Key: aws.String("Name"), + Value: aws.String(name), + }, }, }, } diff --git a/mantle/platform/api/aws/ec2.go b/mantle/platform/api/aws/ec2.go index 8bacd5ec..234c5753 100644 --- a/mantle/platform/api/aws/ec2.go +++ b/mantle/platform/api/aws/ec2.go @@ -82,7 +82,7 @@ func (a *API) DeleteKey(name string) error { } // CreateInstances creates EC2 instances with a given name tag, optional ssh key name, user data. The image ID, instance type, and security group set in the API will be used. CreateInstances will block until all instances are running and have an IP address. -func (a *API) CreateInstances(name, keyname, userdata string, count uint64, minDiskSize int64) ([]*ec2.Instance, error) { +func (a *API) CreateInstances(name, keyname, userdata string, count uint64, minDiskSize int64, useInstanceProfile bool) ([]*ec2.Instance, error) { cnt := int64(count) var ud *string @@ -91,9 +91,11 @@ func (a *API) CreateInstances(name, keyname, userdata string, count uint64, minD ud = &tud } - err := a.ensureInstanceProfile(a.opts.IAMInstanceProfile) - if err != nil { - return nil, fmt.Errorf("error verifying IAM instance profile: %v", err) + if useInstanceProfile { + err := a.ensureInstanceProfile(a.opts.IAMInstanceProfile) + if err != nil { + return nil, fmt.Errorf("error verifying IAM instance profile: %v", err) + } } sgId, err := a.getSecurityGroupID(a.opts.SecurityGroup) @@ -106,75 +108,97 @@ func (a *API) CreateInstances(name, keyname, userdata string, count uint64, minD return nil, fmt.Errorf("error resolving vpc: %v", err) } - zone, err := a.GetZoneForInstanceType(a.opts.InstanceType) + zones, err := a.GetZonesForInstanceType(a.opts.InstanceType) if err != nil { - return nil, fmt.Errorf("error finding zone for instance type %v", a.opts.InstanceType) + // Find all available zones that offer the given instance type + return nil, fmt.Errorf("error finding zones for instance type %v", a.opts.InstanceType) } - subnetId, err := a.getSubnetID(vpcId, zone) - if err != nil { - return nil, fmt.Errorf("error resolving subnet: %v", err) - } + var reservations *ec2.Reservation - key := &keyname - if keyname == "" { - key = nil - } + // Iterate over other possible zones if capacity for an instance + // type is exhausted + for zoneKey, zone := range zones { + subnetId, err := a.getSubnetID(vpcId, zone) + if err != nil { + return nil, fmt.Errorf("error resolving subnet: %v", err) + } - var rootBlockDev []*ec2.BlockDeviceMapping - if minDiskSize > 0 { - rootBlockDev = append(rootBlockDev, &ec2.BlockDeviceMapping{ - DeviceName: aws.String("/dev/xvda"), - Ebs: &ec2.EbsBlockDevice{ - VolumeSize: &minDiskSize, - }, - }) - } - inst := ec2.RunInstancesInput{ - ImageId: &a.opts.AMI, - MinCount: &cnt, - MaxCount: &cnt, - KeyName: key, - InstanceType: &a.opts.InstanceType, - SecurityGroupIds: []*string{&sgId}, - SubnetId: &subnetId, - UserData: ud, - IamInstanceProfile: &ec2.IamInstanceProfileSpecification{ - Name: &a.opts.IAMInstanceProfile, - }, - BlockDeviceMappings: rootBlockDev, - TagSpecifications: []*ec2.TagSpecification{ - &ec2.TagSpecification{ - ResourceType: aws.String(ec2.ResourceTypeInstance), - Tags: []*ec2.Tag{ - &ec2.Tag{ - Key: aws.String("Name"), - Value: aws.String(name), - }, - &ec2.Tag{ - Key: aws.String("CreatedBy"), - Value: aws.String("mantle"), + key := &keyname + if keyname == "" { + key = nil + } + + var rootBlockDev []*ec2.BlockDeviceMapping + if minDiskSize > 0 { + rootBlockDev = append(rootBlockDev, &ec2.BlockDeviceMapping{ + DeviceName: aws.String("/dev/xvda"), + Ebs: &ec2.EbsBlockDevice{ + VolumeSize: &minDiskSize, + }, + }) + } + inst := ec2.RunInstancesInput{ + ImageId: &a.opts.AMI, + MinCount: &cnt, + MaxCount: &cnt, + KeyName: key, + InstanceType: &a.opts.InstanceType, + SecurityGroupIds: []*string{&sgId}, + SubnetId: &subnetId, + UserData: ud, + BlockDeviceMappings: rootBlockDev, + TagSpecifications: []*ec2.TagSpecification{ + { + ResourceType: aws.String(ec2.ResourceTypeInstance), + Tags: []*ec2.Tag{ + { + Key: aws.String("Name"), + Value: aws.String(name), + }, + { + Key: aws.String("CreatedBy"), + Value: aws.String("mantle"), + }, }, }, }, - }, - } + } + if useInstanceProfile { + inst.IamInstanceProfile = &ec2.IamInstanceProfileSpecification{ + Name: &a.opts.IAMInstanceProfile, + } + } - var reservations *ec2.Reservation - err = util.RetryConditional(5, 5*time.Second, func(err error) bool { - // due to AWS' eventual consistency despite ensuring that the IAM Instance - // Profile has been created it may not be available to ec2 yet. - if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "iamInstanceProfile.name")) { - return true + err = util.RetryConditional(5, 5*time.Second, func(err error) bool { + // due to AWS' eventual consistency despite ensuring that the IAM Instance + // Profile has been created it may not be available to ec2 yet. + if awsErr, ok := err.(awserr.Error); ok && (awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "iamInstanceProfile.name")) { + return true + } + return false + }, func() error { + var ierr error + reservations, ierr = a.ec2.RunInstances(&inst) + return ierr + }) + if err == nil { + // Successfully started our instance in the requested zone. Break out of the loop + break + } + if err != nil { + // Handle InsufficientInstanceCapacity error specifically + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "InsufficientInstanceCapacity" { + // If we iterate over all possible zones and none of them have sufficient instance(s) + // available we will return the InsufficientInstanceCapacity error + if zoneKey == len(zones)-1 { + return nil, fmt.Errorf("all available zones tried: %v", err) + } + plog.Warningf("Insufficient instances available in zone %v. Trying the next zone\n", zone) + continue + } + return nil, fmt.Errorf("error running instances: %v", err) } - return false - }, func() error { - var ierr error - reservations, ierr = a.ec2.RunInstances(&inst) - return ierr - }) - if err != nil { - return nil, fmt.Errorf("error running instances: %v", err) } ids := make([]string, len(reservations.Instances)) @@ -228,7 +252,7 @@ func (a *API) gcEC2(gracePeriod time.Duration) error { instances, err := a.ec2.DescribeInstances(&ec2.DescribeInstancesInput{ Filters: []*ec2.Filter{ - &ec2.Filter{ + { Name: aws.String("tag:CreatedBy"), Values: aws.StringSlice([]string{"mantle"}), }, @@ -325,15 +349,15 @@ func (a *API) GetConsoleOutput(instanceID string) (string, error) { return string(decoded), nil } -// GetZoneForInstanceType returns an availability zone that offers the -// given instance type. This is useful because not all availabitliy zones +// GetZonesForInstanceType returns all available zones that offer the +// given instance type. This is useful because not all availability zones // offer all instances types. -func (a *API) GetZoneForInstanceType(instanceType string) (string, error) { +func (a *API) GetZonesForInstanceType(instanceType string) ([]string, error) { input := ec2.DescribeInstanceTypeOfferingsInput{ LocationType: aws.String(ec2.LocationTypeAvailabilityZone), Filters: []*ec2.Filter{ - &ec2.Filter{ + { Name: aws.String("instance-type"), Values: []*string{aws.String(instanceType)}, }, @@ -341,10 +365,15 @@ func (a *API) GetZoneForInstanceType(instanceType string) (string, error) { } output, err := a.ec2.DescribeInstanceTypeOfferings(&input) if err != nil { - return "", fmt.Errorf("error describing instance offerings: %v", err) + return nil, fmt.Errorf("error describing instance offerings: %v", err) } if len(output.InstanceTypeOfferings) == 0 { - return "", fmt.Errorf("no availability zones found for this instance type %v:", instanceType) + return nil, fmt.Errorf("no availability zones found for this instance type %v:", instanceType) + } + + var zones []string + for _, v := range output.InstanceTypeOfferings { + zones = append(zones, *v.Location) } - return *output.InstanceTypeOfferings[0].Location, nil + return zones, nil } diff --git a/mantle/platform/api/aws/images.go b/mantle/platform/api/aws/images.go index 37fe3513..0bc4bed2 100644 --- a/mantle/platform/api/aws/images.go +++ b/mantle/platform/api/aws/images.go @@ -79,11 +79,11 @@ func (a *API) FindSnapshot(imageName string) (*Snapshot, error) { // Look for an existing snapshot with this image name. snapshotRes, err := a.ec2.DescribeSnapshots(&ec2.DescribeSnapshotsInput{ Filters: []*ec2.Filter{ - &ec2.Filter{ + { Name: aws.String("status"), Values: aws.StringSlice([]string{"completed"}), }, - &ec2.Filter{ + { Name: aws.String("tag:Name"), Values: aws.StringSlice([]string{imageName}), }, @@ -350,7 +350,7 @@ func (a *API) CreateHVMImage(snapshotID string, diskSizeGiB uint, name string, d VirtualizationType: aws.String("hvm"), RootDeviceName: aws.String("/dev/xvda"), BlockDeviceMappings: []*ec2.BlockDeviceMapping{ - &ec2.BlockDeviceMapping{ + { DeviceName: aws.String("/dev/xvda"), Ebs: &ec2.EbsBlockDevice{ SnapshotId: aws.String(snapshotID), @@ -359,7 +359,7 @@ func (a *API) CreateHVMImage(snapshotID string, diskSizeGiB uint, name string, d VolumeType: aws.String("gp2"), }, }, - &ec2.BlockDeviceMapping{ + { DeviceName: aws.String("/dev/xvdb"), VirtualName: aws.String("ephemeral0"), }, @@ -701,7 +701,7 @@ func (a *API) copyImageIn(sourceRegion, sourceImageID, name, description string, func (a *API) FindImage(name string) (string, error) { describeRes, err := a.ec2.DescribeImages(&ec2.DescribeImagesInput{ Filters: []*ec2.Filter{ - &ec2.Filter{ + { Name: aws.String("name"), Values: aws.StringSlice([]string{name}), }, @@ -747,7 +747,7 @@ func (a *API) PublishImage(imageID string) error { SnapshotId: &snapshotID, CreateVolumePermission: &ec2.CreateVolumePermissionModifications{ Add: []*ec2.CreateVolumePermission{ - &ec2.CreateVolumePermission{ + { Group: aws.String("all"), }, }, @@ -763,7 +763,7 @@ func (a *API) PublishImage(imageID string) error { ImageId: aws.String(imageID), LaunchPermission: &ec2.LaunchPermissionModifications{ Add: []*ec2.LaunchPermission{ - &ec2.LaunchPermission{ + { Group: aws.String("all"), }, }, diff --git a/mantle/platform/api/aws/network.go b/mantle/platform/api/aws/network.go index 0604c3db..c53df3e2 100644 --- a/mantle/platform/api/aws/network.go +++ b/mantle/platform/api/aws/network.go @@ -50,14 +50,15 @@ func (a *API) getSecurityGroupID(name string) (string, error) { // createSecurityGroup creates a security group with tcp/22 access allowed from the // internet. func (a *API) createSecurityGroup(name string) (string, error) { - vpcId, err := a.createVPC() + vpcId, err := a.createVPC(name) if err != nil { return "", err } sg, err := a.ec2.CreateSecurityGroup(&ec2.CreateSecurityGroupInput{ - GroupName: aws.String(name), - Description: aws.String("mantle security group for testing"), - VpcId: aws.String(vpcId), + GroupName: aws.String(name), + Description: aws.String("mantle security group for testing"), + VpcId: aws.String(vpcId), + TagSpecifications: tagSpecCreatedByMantle(name, ec2.ResourceTypeSecurityGroup), }) if err != nil { return "", err @@ -136,10 +137,11 @@ func (a *API) createSecurityGroup(name string) (string, error) { } // createVPC creates a VPC with an IPV4 CidrBlock of 172.31.0.0/16 -func (a *API) createVPC() (string, error) { +func (a *API) createVPC(name string) (string, error) { vpc, err := a.ec2.CreateVpc(&ec2.CreateVpcInput{ - CidrBlock: aws.String("172.31.0.0/16"), - TagSpecifications: tagSpecCreatedByMantle(ec2.ResourceTypeVpc), + AmazonProvidedIpv6CidrBlock: aws.Bool(true), + CidrBlock: aws.String("172.31.0.0/16"), + TagSpecifications: tagSpecCreatedByMantle(name, ec2.ResourceTypeVpc), }) if err != nil { return "", fmt.Errorf("creating VPC: %v", err) @@ -167,12 +169,12 @@ func (a *API) createVPC() (string, error) { return "", fmt.Errorf("enabling DNS Support VPC attribute: %v", err) } - routeTable, err := a.createRouteTable(*vpc.Vpc.VpcId) + routeTable, err := a.createRouteTable(name, *vpc.Vpc.VpcId) if err != nil { return "", fmt.Errorf("creating RouteTable: %v", err) } - err = a.createSubnets(*vpc.Vpc.VpcId, routeTable) + err = a.createSubnets(name, *vpc.Vpc.VpcId, routeTable) if err != nil { return "", fmt.Errorf("creating subnets: %v", err) } @@ -180,12 +182,13 @@ func (a *API) createVPC() (string, error) { return *vpc.Vpc.VpcId, nil } -// createRouteTable creates a RouteTable with a local target for destination -// 172.31.0.0/16 as well as an InternetGateway for destination 0.0.0.0/0 -func (a *API) createRouteTable(vpcId string) (string, error) { +// createRouteTable creates a RouteTable with local targets for subnets for +// destination CIDRs in the VPC as well as an InternetGateway all IPv4/IPv6 +// destinations. +func (a *API) createRouteTable(name, vpcId string) (string, error) { rt, err := a.ec2.CreateRouteTable(&ec2.CreateRouteTableInput{ VpcId: &vpcId, - TagSpecifications: tagSpecCreatedByMantle(ec2.ResourceTypeRouteTable), + TagSpecifications: tagSpecCreatedByMantle(name, ec2.ResourceTypeRouteTable), }) if err != nil { return "", err @@ -194,7 +197,7 @@ func (a *API) createRouteTable(vpcId string) (string, error) { return "", fmt.Errorf("route table was nil after creation") } - igw, err := a.createInternetGateway(vpcId) + igw, err := a.createInternetGateway(name, vpcId) if err != nil { return "", fmt.Errorf("creating internet gateway: %v", err) } @@ -208,13 +211,22 @@ func (a *API) createRouteTable(vpcId string) (string, error) { return "", fmt.Errorf("creating remote route: %v", err) } + _, err = a.ec2.CreateRoute(&ec2.CreateRouteInput{ + DestinationIpv6CidrBlock: aws.String("::/0"), + GatewayId: aws.String(igw), + RouteTableId: rt.RouteTable.RouteTableId, + }) + if err != nil { + return "", fmt.Errorf("creating remote route: %v", err) + } + return *rt.RouteTable.RouteTableId, nil } // creates an InternetGateway and attaches it to the given VPC -func (a *API) createInternetGateway(vpcId string) (string, error) { +func (a *API) createInternetGateway(name, vpcId string) (string, error) { igw, err := a.ec2.CreateInternetGateway(&ec2.CreateInternetGatewayInput{ - TagSpecifications: tagSpecCreatedByMantle(ec2.ResourceTypeInternetGateway), + TagSpecifications: tagSpecCreatedByMantle(name, ec2.ResourceTypeInternetGateway), }) if err != nil { return "", err @@ -234,12 +246,28 @@ func (a *API) createInternetGateway(vpcId string) (string, error) { // createSubnets creates a subnet in each availability zone for the region // that is associated with the given VPC associated with the given RouteTable -func (a *API) createSubnets(vpcId, routeTableId string) error { +func (a *API) createSubnets(name, vpcId, routeTableId string) error { azs, err := a.ec2.DescribeAvailabilityZones(&ec2.DescribeAvailabilityZonesInput{}) if err != nil { return fmt.Errorf("retrieving availability zones: %v", err) } + // We need to determine the block of IPv6 addresses that were assigned + // to us. Let's get that information from the VPC + request, err := a.ec2.DescribeVpcs(&ec2.DescribeVpcsInput{ + VpcIds: []*string{&vpcId}, + }) + if err != nil { + return fmt.Errorf("retrieving info about vpc: %v", err) + } + vpcIpv6CidrBlock := *request.Vpcs[0].Ipv6CidrBlockAssociationSet[0].Ipv6CidrBlock + + // We were given a /56. When we create subnets they want a /64, which + // means there are 32 (8 bits) subnets we can create. The loop below only + // runs 16 times so we only need to pull off the last digit of the hex based + // ipv6 address (which will be a 0). So we pull off '0::/56' here. + ipv6CidrBlockPart := vpcIpv6CidrBlock[:len(vpcIpv6CidrBlock)-6] + for i, az := range azs.AvailabilityZones { // 16 is the maximum amount of zones possible when giving them a /20 // CIDR range inside of a /16 network. @@ -253,11 +281,13 @@ func (a *API) createSubnets(vpcId, routeTableId string) error { name := *az.ZoneName sub, err := a.ec2.CreateSubnet(&ec2.CreateSubnetInput{ - AvailabilityZone: aws.String(name), - VpcId: &vpcId, + AvailabilityZone: aws.String(name), + VpcId: &vpcId, + TagSpecifications: tagSpecCreatedByMantle(name, ec2.ResourceTypeSubnet), // Increment the CIDR block by 16 every time - CidrBlock: aws.String(fmt.Sprintf("172.31.%d.0/20", i*16)), - TagSpecifications: tagSpecCreatedByMantle(ec2.ResourceTypeSubnet), + CidrBlock: aws.String(fmt.Sprintf("172.31.%d.0/20", i*16)), + // Increment the Ipv6CidrBlock by 1 every time (new /64) + Ipv6CidrBlock: aws.String(fmt.Sprintf("%s%x::/64", ipv6CidrBlockPart, i)), }) if err != nil { // Some availability zones get returned but cannot have subnets @@ -279,6 +309,15 @@ func (a *API) createSubnets(vpcId, routeTableId string) error { if err != nil { return err } + _, err = a.ec2.ModifySubnetAttribute(&ec2.ModifySubnetAttributeInput{ + SubnetId: sub.Subnet.SubnetId, + AssignIpv6AddressOnCreation: &ec2.AttributeBooleanValue{ + Value: aws.Bool(true), + }, + }) + if err != nil { + return err + } _, err = a.ec2.AssociateRouteTable(&ec2.AssociateRouteTableInput{ RouteTableId: &routeTableId, diff --git a/mantle/platform/api/azure/api.go b/mantle/platform/api/azure/api.go index 3d00493b..2dd84c18 100644 --- a/mantle/platform/api/azure/api.go +++ b/mantle/platform/api/azure/api.go @@ -27,7 +27,6 @@ import ( "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/resources/resources" armStorage "github.com/Azure/azure-sdk-for-go/arm/storage" - "github.com/Azure/azure-sdk-for-go/management" "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/coreos/pkg/capnslog" @@ -40,7 +39,6 @@ var ( ) type API struct { - client management.Client rgClient resources.GroupsClient imgClient compute.ImagesClient compClient compute.VirtualMachinesClient @@ -55,13 +53,6 @@ type API struct { // New creates a new Azure client. If no publish settings file is provided or // can't be parsed, an anonymous client is created. func New(opts *Options) (*API, error) { - conf := management.DefaultConfig() - conf.APIVersion = "2015-04-01" - - if opts.ManagementURL != "" { - conf.ManagementURL = opts.ManagementURL - } - if opts.StorageEndpointSuffix == "" { opts.StorageEndpointSuffix = storage.DefaultBaseURL } @@ -96,31 +87,16 @@ func New(opts *Options) (*API, error) { opts.SubscriptionName = subOpts.SubscriptionName } - if opts.ManagementURL == "" { - opts.ManagementURL = subOpts.ManagementURL - } - - if opts.ManagementCertificate == nil { - opts.ManagementCertificate = subOpts.ManagementCertificate - } - if opts.StorageEndpointSuffix == "" { opts.StorageEndpointSuffix = subOpts.StorageEndpointSuffix } - client, err := management.NewClientFromConfig(opts.SubscriptionID, opts.ManagementCertificate, conf) - if err != nil { - return nil, fmt.Errorf("failed to create azure client: %v", err) - } - api := &API{ - client: client, - opts: opts, + opts: opts, } - err = api.resolveImage() - if err != nil { - return nil, fmt.Errorf("failed to resolve image: %v", err) + if opts.Sku != "" && opts.DiskURI == "" && opts.Version == "" { + return nil, fmt.Errorf("SKU set to %q but Disk URI and version not set; can't resolve", opts.Sku) } return api, nil @@ -182,12 +158,25 @@ func (a *API) GC(gracePeriod time.Duration) error { for _, l := range *listGroups.Value { if strings.HasPrefix(*l.Name, "kola-cluster") { - createdAt := *(*l.Tags)["createdAt"] - timeCreated, err := time.Parse(time.RFC3339, createdAt) - if err != nil { - return fmt.Errorf("error parsing time: %v", err) + terminate := false + if l.Tags == nil || (*l.Tags)["createdAt"] == nil { + // If the group name starts with kola-cluster and has + // no tags OR no createdAt then it failed to properly + // get created and we should clean it up. + // https://github.com/coreos/coreos-assembler/issues/3057 + terminate = true + } else { + createdAt := *(*l.Tags)["createdAt"] + timeCreated, err := time.Parse(time.RFC3339, createdAt) + if err != nil { + return fmt.Errorf("error parsing time: %v", err) + } + if !timeCreated.After(durationAgo) { + // If the group is older than specified time then gc + terminate = true + } } - if !timeCreated.After(durationAgo) { + if terminate { if err = a.TerminateResourceGroup(*l.Name); err != nil { return err } diff --git a/mantle/platform/api/azure/image.go b/mantle/platform/api/azure/image.go index 5f040904..056642a5 100644 --- a/mantle/platform/api/azure/image.go +++ b/mantle/platform/api/azure/image.go @@ -15,62 +15,9 @@ package azure import ( - "bufio" - "encoding/xml" - "fmt" - "net/http" - "strings" - "github.com/Azure/azure-sdk-for-go/arm/compute" - "github.com/Azure/azure-sdk-for-go/management" ) -// OSImage struct for https://msdn.microsoft.com/en-us/library/azure/jj157192.aspx call. -// -// XXX: the field ordering is important! -type OSImage struct { - XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure OSImage"` - Category string `xml:",omitempty"` // Public || Private || MSDN - Label string `xml:",omitempty"` // Specifies an identifier for the image. - MediaLink string `xml:",omitempty"` // Specifies the location of the vhd file for the image. The storage account where the vhd is located must be associated with the specified subscription. - Name string // Specifies the name of the operating system image. This is the name that is used when creating one or more virtual machines using the image. - OS string // Linux || Windows - Eula string `xml:",omitempty"` // Specifies the End User License Agreement that is associated with the image. The value for this element is a string, but it is recommended that the value be a URL that points to a EULA. - Description string `xml:",omitempty"` // Specifies the description of the image. - ImageFamily string `xml:",omitempty"` // Specifies a value that can be used to group images. - PublishedDate string `xml:",omitempty"` // Specifies the date when the image was added to the image repository. - ShowInGui bool // Specifies whether the image should appear in the image gallery. - PrivacyURI string `xml:"PrivacyUri,omitempty"` // Specifies the URI that points to a document that contains the privacy policy related to the image. - IconURI string `xml:"IconUri,omitempty"` // Specifies the Uri to the icon that is displayed for the image in the Management Portal. - RecommendedVMSize string `xml:",omitempty"` // Specifies the size to use for the virtual machine that is created from the image. - SmallIconURI string `xml:"SmallIconUri,omitempty"` // Specifies the URI to the small icon that is displayed when the image is presented in the Microsoft Azure Management Portal. - Language string `xml:",omitempty"` // Specifies the language of the image. - - LogicalSizeInGB float64 `xml:",omitempty"` //Specifies the size, in GB, of the image. - Location string `xml:",omitempty"` // The geo-location in which this media is located. The Location value is derived from storage account that contains the blob in which the media is located. If the storage account belongs to an affinity group the value is NULL. - AffinityGroup string `xml:",omitempty"` // Specifies the affinity in which the media is located. The AffinityGroup value is derived from storage account that contains the blob in which the media is located. If the storage account does not belong to an affinity group the value is NULL and the element is not displayed in the response. This value is NULL for platform images. - IsPremium string `xml:",omitempty"` // Indicates whether the image contains software or associated services that will incur charges above the core price for the virtual machine. For additional details, see the PricingDetailLink element. - PublisherName string `xml:",omitempty"` // The name of the publisher of the image. All user images have a publisher name of User. - PricingDetailLink string `xml:",omitempty"` // Specifies a URL for an image with IsPremium set to true, which contains the pricing details for a virtual machine that is created from the image. -} - -var azureImageShareURL = "services/images/%s/share?permission=%s" - -func (a *API) ShareImage(image, permission string) error { - url := fmt.Sprintf(azureImageShareURL, image, permission) - op, err := a.client.SendAzurePutRequest(url, "", nil) - if err != nil { - return err - } - - return a.client.WaitForOperation(op, nil) -} - -func IsConflictError(err error) bool { - azerr, ok := err.(management.AzureError) - return ok && azerr.Code == "ConflictError" -} - func (a *API) CreateImage(name, resourceGroup, blobURI string) (compute.Image, error) { _, err := a.imgClient.CreateOrUpdate(resourceGroup, name, compute.Image{ Name: &name, @@ -92,37 +39,6 @@ func (a *API) CreateImage(name, resourceGroup, blobURI string) (compute.Image, e return a.imgClient.Get(resourceGroup, name, "") } -// resolveImage is used to ensure that either a Version or DiskURI -// are provided present for a run. If neither is given via arguments -// it attempts to parse the Version from the version.txt in the Sku's -// release bucket. -func (a *API) resolveImage() error { - // immediately return if the version has been set or if the channel - // is not set via the Sku (this happens in ore) - if a.opts.DiskURI != "" || a.opts.Version != "" || a.opts.Sku == "" { - return nil - } - - resp, err := http.DefaultClient.Get(fmt.Sprintf("https://%s.release.core-os.net/amd64-usr/current/version.txt", a.opts.Sku)) - if err != nil { - return fmt.Errorf("unable to fetch release bucket %v version: %v", a.opts.Sku, err) - } - - scanner := bufio.NewScanner(resp.Body) - for scanner.Scan() { - line := strings.SplitN(scanner.Text(), "=", 2) - if len(line) != 2 { - continue - } - if line[0] == "COREOS_VERSION" { - a.opts.Version = line[1] - return nil - } - } - - return fmt.Errorf("couldn't find COREOS_VERSION in version.txt") -} - // DeleteImage removes Azure image func (a *API) DeleteImage(name, resourceGroup string) error { _, err := a.imgClient.Delete(resourceGroup, name, nil) diff --git a/mantle/platform/api/azure/instance.go b/mantle/platform/api/azure/instance.go index 22b02128..431b9577 100644 --- a/mantle/platform/api/azure/instance.go +++ b/mantle/platform/api/azure/instance.go @@ -15,13 +15,14 @@ package azure import ( + "crypto/rand" "encoding/base64" "fmt" "io" "io/ioutil" - "math/rand" + "math" + "math/big" "regexp" - "strconv" "time" "github.com/Azure/azure-sdk-for-go/arm/compute" @@ -55,10 +56,13 @@ func (a *API) getVMParameters(name, userdata, sshkey, storageAccountURI string, // from the following: Contains an uppercase character, Contains a // lowercase character, Contains a numeric digit, Contains a special // character) Control characters are not allowed" - password := fmt.Sprintf("%s%s%s", "ABC&", strconv.Itoa(rand.Int()), "xyz") - + n, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + panic(fmt.Sprintf("calling crypto/rand.Int() failed and that shouldn't happen: %v", err)) + } + password := fmt.Sprintf("%s%s%s", "ABC&", n, "xyz") osProfile := compute.OSProfile{ - AdminUsername: util.StrToPtr("core"), // unused + AdminUsername: util.StrToPtr("nest"), // unused AdminPassword: util.StrToPtr(password), // unused ComputerName: &name, } @@ -67,7 +71,7 @@ func (a *API) getVMParameters(name, userdata, sshkey, storageAccountURI string, SSH: &compute.SSHConfiguration{ PublicKeys: &[]compute.SSHPublicKey{ { - Path: util.StrToPtr("/home/core/.ssh/authorized_keys"), + Path: util.StrToPtr("/home/nest/.ssh/authorized_keys"), KeyData: &sshkey, }, }, @@ -152,9 +156,13 @@ func (a *API) CreateInstance(name, userdata, sshkey, resourceGroup, storageAccou vmParams := a.getVMParameters(name, userdata, sshkey, fmt.Sprintf("https://%s.blob.core.windows.net/", storageAccount), ip, nic) - _, err = a.compClient.CreateOrUpdate(resourceGroup, name, vmParams, nil) + cancel := make(chan struct{}) + time.AfterFunc(8*time.Minute, func() { + close(cancel) + }) + _, err = a.compClient.CreateOrUpdate(resourceGroup, name, vmParams, cancel) if err != nil { - return nil, err + return nil, fmt.Errorf("creating instance failed: %w", err) } err = util.WaitUntilReady(5*time.Minute, 10*time.Second, func() (bool, error) { @@ -205,7 +213,7 @@ func (a *API) TerminateInstance(name, resourceGroup string) error { } func (a *API) GetConsoleOutput(name, resourceGroup, storageAccount string) ([]byte, error) { - kr, err := a.GetStorageServiceKeysARM(storageAccount, resourceGroup) + kr, err := a.GetStorageServiceKeys(storageAccount, resourceGroup) if err != nil { return nil, fmt.Errorf("retrieving storage service keys: %v", err) } diff --git a/mantle/platform/api/azure/options.go b/mantle/platform/api/azure/options.go index 30acd998..c245161d 100644 --- a/mantle/platform/api/azure/options.go +++ b/mantle/platform/api/azure/options.go @@ -36,10 +36,6 @@ type Options struct { SubscriptionName string SubscriptionID string - // Azure API endpoint. If unset, the Azure SDK default will be used. - ManagementURL string - ManagementCertificate []byte - // Azure Storage API endpoint suffix. If unset, the Azure SDK default will be used. StorageEndpointSuffix string } diff --git a/mantle/platform/api/azure/replication.go b/mantle/platform/api/azure/replication.go deleted file mode 100644 index 237a8f07..00000000 --- a/mantle/platform/api/azure/replication.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "encoding/xml" - "fmt" - - "github.com/Azure/azure-sdk-for-go/management/location" -) - -const ( - computeService = "Compute" -) - -var ( - azureImageReplicateURL = "services/images/%s/replicate" - azureImageUnreplicateURL = "services/images/%s/unreplicate" -) - -type ReplicationInput struct { - XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ReplicationInput"` - TargetLocations []string `xml:"TargetLocations>Region"` - Offer string `xml:"ComputeImageAttributes>Offer"` - Sku string `xml:"ComputeImageAttributes>Sku"` - Version string `xml:"ComputeImageAttributes>Version"` -} - -// Locations returns a slice of Azure Locations which offer the Compute -// service, useful for replicating to all Locations. -func (a *API) Locations() ([]string, error) { - lc := location.NewClient(a.client) - - llr, err := lc.ListLocations() - if err != nil { - return nil, err - } - - var locations []string - - for _, l := range llr.Locations { - haveCompute := false - for _, svc := range l.AvailableServices { - if svc == computeService { - haveCompute = true - break - } - } - - if haveCompute { - locations = append(locations, l.Name) - } else { - plog.Infof("Skipping location %q without %s service", l.Name, computeService) - } - } - - return locations, nil -} - -func (a *API) ReplicateImage(image, offer, sku, version string, regions ...string) error { - ri := ReplicationInput{ - TargetLocations: regions, - Offer: offer, - Sku: sku, - Version: version, - } - - data, err := xml.Marshal(&ri) - if err != nil { - return err - } - - url := fmt.Sprintf(azureImageReplicateURL, image) - - op, err := a.client.SendAzurePutRequest(url, "", data) - if err != nil { - return err - } - - return a.client.WaitForOperation(op, nil) -} - -func (a *API) UnreplicateImage(image string) error { - url := fmt.Sprintf(azureImageUnreplicateURL, image) - op, err := a.client.SendAzurePutRequest(url, "", []byte{}) - if err != nil { - return err - } - - return a.client.WaitForOperation(op, nil) -} diff --git a/mantle/platform/api/azure/storage.go b/mantle/platform/api/azure/storage.go index 79768527..2e8db1c7 100644 --- a/mantle/platform/api/azure/storage.go +++ b/mantle/platform/api/azure/storage.go @@ -15,76 +15,16 @@ package azure import ( - "encoding/xml" "fmt" - "net/url" - "path" "strings" "github.com/Azure/azure-sdk-for-go/arm/storage" - "github.com/Azure/azure-sdk-for-go/management" - "github.com/Azure/azure-sdk-for-go/management/storageservice" ) -var ( - azureImageURL = "services/images" -) - -func (a *API) GetStorageServiceKeys(account string) (storageservice.GetStorageServiceKeysResponse, error) { - return storageservice.NewClient(a.client).GetStorageServiceKeys(account) -} - -func (a *API) GetStorageServiceKeysARM(account, resourceGroup string) (storage.AccountListKeysResult, error) { +func (a *API) GetStorageServiceKeys(account, resourceGroup string) (storage.AccountListKeysResult, error) { return a.accClient.ListKeys(resourceGroup, account) } -// https://msdn.microsoft.com/en-us/library/azure/jj157192.aspx -func (a *API) AddOSImage(md *OSImage) error { - data, err := xml.Marshal(md) - if err != nil { - return err - } - - op, err := a.client.SendAzurePostRequest(azureImageURL, data) - if err != nil { - return err - } - - return a.client.WaitForOperation(op, nil) -} - -func (a *API) OSImageExists(name string) (bool, error) { - url := fmt.Sprintf("%s/%s", azureImageURL, name) - response, err := a.client.SendAzureGetRequest(url) - if err != nil { - if management.IsResourceNotFoundError(err) { - return false, nil - } - - return false, err - } - - var image OSImage - - if err := xml.Unmarshal(response, &image); err != nil { - return false, err - } - - if image.Name == name { - return true, nil - } - - return false, nil -} - -func (a *API) UrlOfBlob(account, container, blob string) *url.URL { - return &url.URL{ - Scheme: "https", - Host: fmt.Sprintf("%s.blob.%s", account, a.opts.StorageEndpointSuffix), - Path: path.Join(container, blob), - } -} - func (a *API) CreateStorageAccount(resourceGroup string) (string, error) { // Only lower-case letters & numbers allowed in storage account names name := strings.Replace(randomName("kolasa"), "-", "", -1) diff --git a/mantle/platform/api/azure/storage_mit.go b/mantle/platform/api/azure/storage_mit.go index 5a92b379..d58e7a74 100644 --- a/mantle/platform/api/azure/storage_mit.go +++ b/mantle/platform/api/azure/storage_mit.go @@ -43,30 +43,6 @@ const pageBlobPageSize int64 = 2 * 1024 * 1024 type BlobExistsError string -func (a *API) ListStorageContainers(storageaccount, storagekey, prefix string) (storage.ContainerListResponse, error) { - sc, err := storage.NewClient(storageaccount, storagekey, a.opts.StorageEndpointSuffix, storage.DefaultAPIVersion, true) - if err != nil { - return storage.ContainerListResponse{}, err - } - - bsc := sc.GetBlobService() - - return bsc.ListContainers(storage.ListContainersParameters{ - Prefix: prefix, - }) -} - -func (a *API) TerminateStorageContainer(storageaccount, storagekey, name string) error { - sc, err := storage.NewClient(storageaccount, storagekey, a.opts.StorageEndpointSuffix, storage.DefaultAPIVersion, true) - if err != nil { - return err - } - - bsc := sc.GetBlobService() - - return bsc.DeleteContainer(name) -} - func (be BlobExistsError) Error() string { return fmt.Sprintf("blob %q already exists", string(be)) } @@ -96,6 +72,27 @@ func (a *API) GetBlob(storageaccount, storagekey, container, name string) (io.Re return bsc.GetBlob(container, name) } +// DeleteBlob deletes the given blob specified by the given storage account, +// container, and blob name. +func (a *API) DeleteBlob(storageaccount, storagekey, container, blob string) error { + sc, err := storage.NewClient(storageaccount, storagekey, a.opts.StorageEndpointSuffix, storage.DefaultAPIVersion, true) + if err != nil { + return err + } + + bsc := sc.GetBlobService() + if _, err = bsc.CreateContainerIfNotExists(container, storage.ContainerAccessTypePrivate); err != nil { + return err + } + + err = bsc.DeleteBlob(container, blob, nil) + if err != nil { + return err + } + + return nil +} + // UploadBlob uploads vhd to the given storage account, container, and blob name. // // It returns BlobExistsError if the blob exists and overwrite is not true. diff --git a/mantle/platform/api/gcloud/api.go b/mantle/platform/api/gcloud/api.go index 04744a30..889d634c 100644 --- a/mantle/platform/api/gcloud/api.go +++ b/mantle/platform/api/gcloud/api.go @@ -18,7 +18,6 @@ package gcloud import ( "context" "google.golang.org/api/option" - "io/ioutil" "net/http" "time" @@ -40,6 +39,7 @@ type Options struct { MachineType string DiskType string Network string + ServiceAcct string JSONKeyFile string ServiceAuth bool *platform.Options @@ -67,21 +67,12 @@ func New(opts *Options) (*API, error) { if opts.ServiceAuth { client = auth.GoogleServiceClient() - } else if opts.JSONKeyFile != "" { - b, err := ioutil.ReadFile(opts.JSONKeyFile) + } else { + client, err = auth.GoogleClientFromKeyFile(opts.JSONKeyFile) if err != nil { plog.Fatal(err) + return nil, err } - client, err = auth.GoogleClientFromJSONKey(b) - if err != nil { - plog.Error(err) - } - } else { - client, err = auth.GoogleClient() - } - - if err != nil { - return nil, err } ctx := context.Background() @@ -91,6 +82,14 @@ func New(opts *Options) (*API, error) { return nil, err } + if opts.ServiceAcct == "" { + proj, err := computeService.Projects.Get(opts.Project).Do() + if err != nil { + return nil, err + } + opts.ServiceAcct = proj.DefaultServiceAccount + } + api := &API{ client: client, compute: computeService, diff --git a/mantle/platform/api/gcloud/compute.go b/mantle/platform/api/gcloud/compute.go index a8caf458..f2f1d680 100644 --- a/mantle/platform/api/gcloud/compute.go +++ b/mantle/platform/api/gcloud/compute.go @@ -34,10 +34,10 @@ func (a *API) vmname() string { } // Taken from: https://github.com/golang/build/blob/master/buildlet/gce.go -func (a *API) mkinstance(userdata, name string, keys []*agent.Key) *compute.Instance { +func (a *API) mkinstance(userdata, name string, keys []*agent.Key, useServiceAcct bool) *compute.Instance { mantle := "mantle" metadataItems := []*compute.MetadataItems{ - &compute.MetadataItems{ + { // this should be done with a label instead, but // our old vendored Go binding doesn't support those Key: "created-by", @@ -84,9 +84,9 @@ func (a *API) mkinstance(userdata, name string, keys []*agent.Key) *compute.Inst }, }, NetworkInterfaces: []*compute.NetworkInterface{ - &compute.NetworkInterface{ + { AccessConfigs: []*compute.AccessConfig{ - &compute.AccessConfig{ + { Type: "ONE_TO_ONE_NAT", Name: "External NAT", }, @@ -95,6 +95,15 @@ func (a *API) mkinstance(userdata, name string, keys []*agent.Key) *compute.Inst }, }, } + if useServiceAcct { + // allow the instance to perform authenticated GCS fetches + instance.ServiceAccounts = []*compute.ServiceAccount{ + { + Email: a.options.ServiceAcct, + Scopes: []string{"https://www.googleapis.com/auth/devstorage.read_only"}, + }, + } + } // add cloud config if userdata != "" { instance.Metadata.Items = append(instance.Metadata.Items, &compute.MetadataItems{ @@ -108,9 +117,9 @@ func (a *API) mkinstance(userdata, name string, keys []*agent.Key) *compute.Inst } // CreateInstance creates a Google Compute Engine instance. -func (a *API) CreateInstance(userdata string, keys []*agent.Key) (*compute.Instance, error) { +func (a *API) CreateInstance(userdata string, keys []*agent.Key, useServiceAcct bool) (*compute.Instance, error) { name := a.vmname() - inst := a.mkinstance(userdata, name, keys) + inst := a.mkinstance(userdata, name, keys, useServiceAcct) plog.Debugf("Creating instance %q", name) diff --git a/mantle/platform/api/gcloud/pending.go b/mantle/platform/api/gcloud/pending.go index e34c7ae2..f8b697fd 100644 --- a/mantle/platform/api/gcloud/pending.go +++ b/mantle/platform/api/gcloud/pending.go @@ -71,7 +71,11 @@ func (p *Pending) Wait() error { } if op.Error != nil { if len(op.Error.Errors) > 0 { - return fmt.Errorf("Operation %q failed: %+v", p.desc, op.Error.Errors) + var messages []string + for _, err := range op.Error.Errors { + messages = append(messages, err.Message) + } + return fmt.Errorf("Operation %q failed: %+v", p.desc, messages) } return fmt.Errorf("Operation %q failed to start", p.desc) } diff --git a/mantle/platform/api/ibmcloud/s3.go b/mantle/platform/api/ibmcloud/s3.go index 521a14a5..5dcd0352 100644 --- a/mantle/platform/api/ibmcloud/s3.go +++ b/mantle/platform/api/ibmcloud/s3.go @@ -130,10 +130,8 @@ func (a *API) checkIfObjectExists(objectName, bucketName string) bool { } _, err := a.s3client.s3Session.GetObject(input) - if err != nil { - return false - } - return true + // XXX: this should actually check the exact error returned + return err == nil } //UploadObject - upload to s3 bucket @@ -176,10 +174,9 @@ func (a *API) CopyObject(srcBucket, srcName, destBucket string) error { }) if err != nil { if awserr, ok := err.(awserr.Error); ok { - if awserr.Code() == "BucketAlreadyOwnedByYou" { - return nil - } + err = awserr } + return fmt.Errorf("Error copying object to bucket: %v", err) } // Wait to see if the item got copied diff --git a/mantle/platform/api/openstack/api.go b/mantle/platform/api/openstack/api.go index 5891eb5d..29961d45 100644 --- a/mantle/platform/api/openstack/api.go +++ b/mantle/platform/api/openstack/api.go @@ -305,7 +305,7 @@ func (a *API) CreateServer(name, sshKeyID, userdata string) (*Server, error) { // Previously we were timing out because it was taking 10+ minutes for // instances to come up in VexxHost. This helps with that. bootVolume := []bootfromvolume.BlockDevice{ - bootfromvolume.BlockDevice{ + { UUID: a.opts.Image, VolumeSize: 10, DeleteOnTermination: true, @@ -609,7 +609,7 @@ func (a *API) DeleteImage(imageID string, force bool) error { NewProtected: false, }, } - image, err = images.Update(a.imageClient, imageID, updateOpts).Extract() + _, err = images.Update(a.imageClient, imageID, updateOpts).Extract() if err != nil { return fmt.Errorf( "Error removing protection from image %s: %v", imageID, err) diff --git a/mantle/platform/api/packet/api.go b/mantle/platform/api/packet/api.go index 387519e7..9b587ee1 100644 --- a/mantle/platform/api/packet/api.go +++ b/mantle/platform/api/packet/api.go @@ -326,7 +326,7 @@ RequiredBy=multi-user.target }, Storage: ignition.Storage{ Files: []ignition.File{ - ignition.File{ + { Node: ignition.Node{ Path: "/var/userdata", }, @@ -341,22 +341,22 @@ RequiredBy=multi-user.target }, Systemd: ignition.Systemd{ Units: []ignition.Unit{ - ignition.Unit{ + { // don't appear to be running while install is in progress Name: "sshd.service", Mask: util.BoolToPtr(true), }, - ignition.Unit{ + { // allow remote detection of install in progress Name: "discard.socket", Enabled: util.BoolToPtr(true), Contents: util.StrToPtr(discardSocketUnit), }, - ignition.Unit{ + { Name: "discard@.service", Contents: util.StrToPtr(discardServiceUnit), }, - ignition.Unit{ + { Name: "coreos-installer.service", Enabled: util.BoolToPtr(true), Contents: util.StrToPtr(installUnit), diff --git a/mantle/platform/cluster.go b/mantle/platform/cluster.go index 53f749c0..ebdc3786 100644 --- a/mantle/platform/cluster.go +++ b/mantle/platform/cluster.go @@ -41,6 +41,12 @@ type BaseCluster struct { bf *BaseFlight name string rconf *RuntimeConfig + + // the number of machines running (have not been released) + // Note: numMachines <= len(machmap), since numMachines + // is decremented before the machine destroy process begins, and + // machmap is updated usually near the end. + numMachines int } func NewBaseCluster(bf *BaseFlight, rconf *RuntimeConfig) (*BaseCluster, error) { @@ -158,6 +164,7 @@ func (bc *BaseCluster) AddMach(m Machine) { if err := bc.appendSSH(m); err != nil { panic(err) } + bc.numMachines++ } func (bc *BaseCluster) DelMach(m Machine) { @@ -207,34 +214,29 @@ func (bc *BaseCluster) RenderUserData(userdata *platformConf.UserData, ignitionV conf.CopyKeys(keys) } - // disable Zincati & Pinger by default + // disable Zincati by default if bc.Distribution() == "fcos" { - conf.AddFile("/etc/fedora-coreos-pinger/config.d/90-disable-reporting.toml", `[reporting] -enabled = false`, 0644) conf.AddFile("/etc/zincati/config.d/90-disable-auto-updates.toml", `[updates] enabled = false`, 0644) } if bc.bf.baseopts.OSContainer != "" { - if bc.Distribution() != "rhcos" { - return nil, fmt.Errorf("oscontainer is only supported on the rhcos distribution") - } - conf.AddSystemdUnitDropin("pivot.service", "00-before-sshd.conf", `[Unit] -Before=sshd.service`) - conf.AddSystemdUnit("pivot.service", "", platformConf.Enable) - conf.AddSystemdUnit("pivot-write-reboot-needed.service", `[Unit] -Description=Touch /run/pivot/reboot-needed -ConditionFirstBoot=true + conf.AddSystemdUnit("kola-container-rebase.service", fmt.Sprintf(`[Unit] +Description=Rebase to target container +ConditionPathExists=!/etc/kola-rebase-done +Before=sshd.service +Wants=network-online.target +After=network-online.target [Service] Type=oneshot -ExecStart=/usr/bin/mkdir -p /run/pivot -ExecStart=/usr/bin/touch /run/pivot/reboot-needed +ExecStart=rpm-ostree rebase --experimental %s +ExecStart=touch /etc/kola-rebase-done +ExecStart=systemctl reboot [Install] WantedBy=multi-user.target -`, platformConf.Enable) - conf.AddFile("/etc/pivot/image-pullspec", bc.bf.baseopts.OSContainer, 0644) +`, bc.bf.baseopts.OSContainer), platformConf.Enable) } if conf.IsIgnition() { @@ -249,6 +251,7 @@ WantedBy=multi-user.target // Destroy destroys each machine in the cluster. func (bc *BaseCluster) Destroy() { for _, m := range bc.Machines() { + bc.numMachines-- m.Destroy() } } @@ -292,3 +295,11 @@ func (bc *BaseCluster) JournalOutput() map[string]string { } return ret } + +func (bc *BaseCluster) EarlyRelease() { + bc.machlock.Lock() + defer bc.machlock.Unlock() + if bc.rconf.EarlyRelease != nil && bc.numMachines == 0 { + bc.rconf.EarlyRelease() + } +} diff --git a/mantle/platform/conf/conf.go b/mantle/platform/conf/conf.go index 2190459b..76b216af 100644 --- a/mantle/platform/conf/conf.go +++ b/mantle/platform/conf/conf.go @@ -390,6 +390,9 @@ func (c *Conf) MaybeCompress() (string, error) { var buff bytes.Buffer config := c.String() writer, err := gzip.NewWriterLevel(&buff, gzip.BestCompression) + if err != nil { + return "", err + } defer writer.Close() if _, err := writer.Write([]byte(config)); err != nil { return "", err @@ -839,9 +842,9 @@ func (c *Conf) AddSystemdUnitDropin(service, name, contents string) { } } -func (c *Conf) addAuthorizedKeysV3(username string, keys []string) { +func (c *Conf) addAuthorizedKeysV3(username string, keys map[string]struct{}) { var keyObjs []v3types.SSHAuthorizedKey - for _, key := range keys { + for key := range keys { keyObjs = append(keyObjs, v3types.SSHAuthorizedKey(key)) } newConfig := v3types.Config{ @@ -860,9 +863,9 @@ func (c *Conf) addAuthorizedKeysV3(username string, keys []string) { c.MergeV3(newConfig) } -func (c *Conf) addAuthorizedKeysV31(username string, keys []string) { +func (c *Conf) addAuthorizedKeysV31(username string, keys map[string]struct{}) { var keyObjs []v31types.SSHAuthorizedKey - for _, key := range keys { + for key := range keys { keyObjs = append(keyObjs, v31types.SSHAuthorizedKey(key)) } newConfig := v31types.Config{ @@ -881,9 +884,9 @@ func (c *Conf) addAuthorizedKeysV31(username string, keys []string) { c.MergeV31(newConfig) } -func (c *Conf) addAuthorizedKeysV32(username string, keys []string) { +func (c *Conf) addAuthorizedKeysV32(username string, keys map[string]struct{}) { var keyObjs []v32types.SSHAuthorizedKey - for _, key := range keys { + for key := range keys { keyObjs = append(keyObjs, v32types.SSHAuthorizedKey(key)) } newConfig := v32types.Config{ @@ -902,9 +905,9 @@ func (c *Conf) addAuthorizedKeysV32(username string, keys []string) { c.MergeV32(newConfig) } -func (c *Conf) addAuthorizedKeysV33(username string, keys []string) { +func (c *Conf) addAuthorizedKeysV33(username string, keys map[string]struct{}) { var keyObjs []v33types.SSHAuthorizedKey - for _, key := range keys { + for key := range keys { keyObjs = append(keyObjs, v33types.SSHAuthorizedKey(key)) } newConfig := v33types.Config{ @@ -923,9 +926,9 @@ func (c *Conf) addAuthorizedKeysV33(username string, keys []string) { c.MergeV33(newConfig) } -func (c *Conf) addAuthorizedKeysV34exp(username string, keys []string) { +func (c *Conf) addAuthorizedKeysV34exp(username string, keys map[string]struct{}) { var keyObjs []v34exptypes.SSHAuthorizedKey - for _, key := range keys { + for key := range keys { keyObjs = append(keyObjs, v34exptypes.SSHAuthorizedKey(key)) } newConfig := v34exptypes.Config{ @@ -947,16 +950,21 @@ func (c *Conf) addAuthorizedKeysV34exp(username string, keys []string) { // AddAuthorizedKeys adds an Ignition config to add the given keys to the SSH // authorized_keys file for the given user. func (c *Conf) AddAuthorizedKeys(user string, keys []string) { + // make it into a set to dedupe any keys + keysSet := map[string]struct{}{} + for _, key := range keys { + keysSet[key] = struct{}{} + } if c.ignitionV3 != nil { - c.addAuthorizedKeysV3(user, keys) + c.addAuthorizedKeysV3(user, keysSet) } else if c.ignitionV31 != nil { - c.addAuthorizedKeysV31(user, keys) + c.addAuthorizedKeysV31(user, keysSet) } else if c.ignitionV32 != nil { - c.addAuthorizedKeysV32(user, keys) + c.addAuthorizedKeysV32(user, keysSet) } else if c.ignitionV33 != nil { - c.addAuthorizedKeysV33(user, keys) + c.addAuthorizedKeysV33(user, keysSet) } else if c.ignitionV34exp != nil { - c.addAuthorizedKeysV34exp(user, keys) + c.addAuthorizedKeysV34exp(user, keysSet) } } @@ -967,7 +975,7 @@ func (c *Conf) CopyKeys(keys []*agent.Key) { for _, key := range keys { keyStrs = append(keyStrs, key.String()) } - c.AddAuthorizedKeys("core", keyStrs) + c.AddAuthorizedKeys("nest", keyStrs) } func (c *Conf) addConfigSourceV3(source string) { @@ -976,7 +984,7 @@ func (c *Conf) addConfigSourceV3(source string) { Version: "3.0.0", Config: v3types.IgnitionConfig{ Merge: []v3types.ConfigReference{ - v3types.ConfigReference{ + { Source: &source, }, }, @@ -1103,7 +1111,7 @@ func (c *Conf) IsEmpty() bool { func getAutologinUnit(name, args string) string { return fmt.Sprintf(`[Service] ExecStart= - ExecStart=-/sbin/agetty --autologin core -o '-p -f core' %s %%I $TERM + ExecStart=-/sbin/agetty --autologin nest -o '-p -f nest' %s %%I $TERM `, args) } diff --git a/mantle/platform/conf/conf_test.go b/mantle/platform/conf/conf_test.go index 46e6cd89..d74276d9 100644 --- a/mantle/platform/conf/conf_test.go +++ b/mantle/platform/conf/conf_test.go @@ -74,7 +74,7 @@ func TestConfCopyKey(t *testing.T) { str := conf.String() - if !strings.Contains(str, "ecdsa-sha2-nistp256 ") || !strings.Contains(str, " core@default") { + if !strings.Contains(str, "ssh-rsa ") || !strings.Contains(str, " nest@default") { t.Errorf("ssh public key not found in config %d: %s", i, str) continue } diff --git a/mantle/platform/machine/aws/cluster.go b/mantle/platform/machine/aws/cluster.go index b3dbce14..700fe629 100644 --- a/mantle/platform/machine/aws/cluster.go +++ b/mantle/platform/machine/aws/cluster.go @@ -83,7 +83,7 @@ func (ac *cluster) NewMachineWithOptions(userdata *conf.UserData, options platfo fmt.Printf("WARNING: compressed userdata exceeds expected limit of %d\n", MaxUserDataSize) } } - instances, err := ac.flight.api.CreateInstances(ac.Name(), keyname, ud, 1, int64(options.MinDiskSize)) + instances, err := ac.flight.api.CreateInstances(ac.Name(), keyname, ud, 1, int64(options.MinDiskSize), !ac.RuntimeConf().NoInstanceCreds) if err != nil { return nil, err } diff --git a/mantle/platform/machine/aws/flight.go b/mantle/platform/machine/aws/flight.go index 1805085f..cdf6e2ab 100644 --- a/mantle/platform/machine/aws/flight.go +++ b/mantle/platform/machine/aws/flight.go @@ -61,19 +61,12 @@ func NewFlight(opts *aws.Options) (platform.Flight, error) { api: api, } - // We have worked around the golang library limitation for - // keyexchange algorithm by switching to an ecdsa key in - // network/ssh.go. However, AWS requires an rsa key. For now - // (until we get an updated golang library) we'll just satisfy - // the requirement by using a fake key and disabling the - // fcos.ignition.misc.empty and fcos.ignition.v3.noop tests on AWS. - // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html#how-to-generate-your-own-key-and-import-it-to-aws - key, err := platform.GenerateFakeKey() + keys, err := af.Keys() if err != nil { af.Destroy() return nil, err } - if err := api.AddKey(af.Name(), key); err != nil { + if err := api.AddKey(af.Name(), keys[0].String()); err != nil { af.Destroy() return nil, err } diff --git a/mantle/platform/machine/aws/machine.go b/mantle/platform/machine/aws/machine.go index 89dc8a6f..929ceddc 100644 --- a/mantle/platform/machine/aws/machine.go +++ b/mantle/platform/machine/aws/machine.go @@ -94,6 +94,7 @@ func (am *machine) Destroy() { am.journal.Destroy() } + am.cluster.EarlyRelease() if err := am.saveConsole(origConsole); err != nil { plog.Errorf("Error saving console for instance %v: %v", am.ID(), err) } diff --git a/mantle/platform/machine/azure/cluster.go b/mantle/platform/machine/azure/cluster.go index 32df1061..4dd74965 100644 --- a/mantle/platform/machine/azure/cluster.go +++ b/mantle/platform/machine/azure/cluster.go @@ -67,12 +67,7 @@ func (ac *cluster) NewMachineWithOptions(userdata *conf.UserData, options platfo return nil, err } - sshkey := "" - if !ac.RuntimeConf().NoSSHKeyInMetadata { - sshkey = ac.sshKey - } - - instance, err := ac.flight.api.CreateInstance(ac.vmname(), conf.String(), sshkey, ac.ResourceGroup, ac.StorageAccount) + instance, err := ac.flight.api.CreateInstance(ac.vmname(), conf.String(), ac.sshKey, ac.ResourceGroup, ac.StorageAccount) if err != nil { return nil, err } diff --git a/mantle/platform/machine/azure/flight.go b/mantle/platform/machine/azure/flight.go index d7c84fc1..62a801d2 100644 --- a/mantle/platform/machine/azure/flight.go +++ b/mantle/platform/machine/azure/flight.go @@ -33,9 +33,8 @@ var ( type flight struct { *platform.BaseFlight - api *azure.API - SSHKey string - FakeSSHKey string + api *azure.API + SSHKey string } // NewFlight creates an instance of a Flight suitable for spawning @@ -66,10 +65,6 @@ func NewFlight(opts *azure.Options) (platform.Flight, error) { return nil, err } af.SSHKey = keys[0].String() - af.FakeSSHKey, err = platform.GenerateFakeKey() - if err != nil { - return nil, err - } return af, nil } @@ -87,17 +82,9 @@ func (af *flight) NewCluster(rconf *platform.RuntimeConfig) (platform.Cluster, e flight: af, } - // We have worked around the golang library limitation for - // keyexchange algorithm by switching to an ecdsa key in - // network/ssh.go. However, Azure requires an RSA key. For - // now (until we get an updated golang library) we'll just - // satisfy the requirement by using a fake key and disabling - // the fcos.ignition.misc.empty and fcos.ignition.v3.noop - // tests on Azure. - // https://github.com/coreos/coreos-assembler/issues/1772 - // https://docs.microsoft.com/en-us/azure/virtual-machines/linux/mac-create-ssh-keys#supported-ssh-key-formats - ac.sshKey = af.FakeSSHKey - //ac.sshKey = af.SSHKey + if !rconf.NoSSHKeyInMetadata { + ac.sshKey = af.SSHKey + } ac.ResourceGroup, err = af.api.CreateResourceGroup("kola-cluster") if err != nil { @@ -106,11 +93,17 @@ func (af *flight) NewCluster(rconf *platform.RuntimeConfig) (platform.Cluster, e ac.StorageAccount, err = af.api.CreateStorageAccount(ac.ResourceGroup) if err != nil { + if e := af.api.TerminateResourceGroup(ac.ResourceGroup); e != nil { + plog.Errorf("Deleting resource group %v: %v", ac.ResourceGroup, e) + } return nil, err } _, err = af.api.PrepareNetworkResources(ac.ResourceGroup) if err != nil { + if e := af.api.TerminateResourceGroup(ac.ResourceGroup); e != nil { + plog.Errorf("Deleting resource group %v: %v", ac.ResourceGroup, e) + } return nil, err } diff --git a/mantle/platform/machine/gcloud/cluster.go b/mantle/platform/machine/gcloud/cluster.go index bdabd886..ff0ab8be 100644 --- a/mantle/platform/machine/gcloud/cluster.go +++ b/mantle/platform/machine/gcloud/cluster.go @@ -69,7 +69,7 @@ func (gc *cluster) NewMachineWithOptions(userdata *conf.UserData, options platfo } } - instance, err := gc.flight.api.CreateInstance(conf.String(), keys) + instance, err := gc.flight.api.CreateInstance(conf.String(), keys, !gc.RuntimeConf().NoInstanceCreds) if err != nil { return nil, err } diff --git a/mantle/platform/machine/qemuiso/cluster.go b/mantle/platform/machine/qemuiso/cluster.go index d3008ccb..e53c5fdd 100644 --- a/mantle/platform/machine/qemuiso/cluster.go +++ b/mantle/platform/machine/qemuiso/cluster.go @@ -119,7 +119,9 @@ func (qc *Cluster) NewMachineWithQemuOptions(userdata *conf.UserData, options pl return nil, errors.Wrapf(err, "adding ISO image") } - builder.AddDisksFromSpecs(options.AdditionalDisks) + if err = builder.AddDisksFromSpecs(options.AdditionalDisks); err != nil { + return nil, err + } if len(options.HostForwardPorts) > 0 { builder.EnableUsermodeNetworking(options.HostForwardPorts) diff --git a/mantle/platform/machine/unprivqemu/cluster.go b/mantle/platform/machine/unprivqemu/cluster.go index 8a8828ff..3acd6d3f 100644 --- a/mantle/platform/machine/unprivqemu/cluster.go +++ b/mantle/platform/machine/unprivqemu/cluster.go @@ -143,11 +143,12 @@ func (qc *Cluster) NewMachineWithQemuOptions(userdata *conf.UserData, options pl primaryDisk.BackingFile = options.OverrideBackingFile } - err = builder.AddBootDisk(&primaryDisk) - if err != nil { + if err = builder.AddBootDisk(&primaryDisk); err != nil { + return nil, err + } + if err = builder.AddDisksFromSpecs(options.AdditionalDisks); err != nil { return nil, err } - builder.AddDisksFromSpecs(options.AdditionalDisks) if len(options.HostForwardPorts) > 0 { builder.EnableUsermodeNetworking(options.HostForwardPorts) diff --git a/mantle/platform/metal.go b/mantle/platform/metal.go index d4fb2d81..c5a4f435 100644 --- a/mantle/platform/metal.go +++ b/mantle/platform/metal.go @@ -24,14 +24,15 @@ import ( "os" "path/filepath" "strings" + "time" + coreosarch "github.com/coreos/stream-metadata-go/arch" "github.com/pkg/errors" "gopkg.in/yaml.v2" "github.com/coreos/mantle/platform/conf" - "github.com/coreos/mantle/sdk" - "github.com/coreos/mantle/system" "github.com/coreos/mantle/system/exec" + "github.com/coreos/mantle/util" ) const ( @@ -78,7 +79,7 @@ func NewMetalQemuBuilderDefault() *QemuBuilder { } type Install struct { - CosaBuild *sdk.LocalBuild + CosaBuild *util.LocalBuild Builder *QemuBuilder Insecure bool Native4k bool @@ -98,12 +99,28 @@ type InstalledMachine struct { BootStartedErrorChannel chan error } -func (inst *Install) PXE(kargs []string, liveIgnition, ignition conf.Conf, offline bool) (*InstalledMachine, error) { - if inst.CosaBuild.Meta.BuildArtifacts.Metal == nil { - return nil, fmt.Errorf("Build %s must have a `metal` artifact", inst.CosaBuild.Meta.OstreeVersion) +// Check that artifact has been built and locally exists +func (inst *Install) checkArtifactsExist(artifacts []string) error { + version := inst.CosaBuild.Meta.OstreeVersion + for _, name := range artifacts { + artifact, err := inst.CosaBuild.Meta.GetArtifact(name) + if err != nil { + return fmt.Errorf("Missing artifact %s for %s build: %s", name, version, err) + } + path := filepath.Join(inst.CosaBuild.Dir, artifact.Path) + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("Missing local file for artifact %s for build %s", name, version) + } + } } - if inst.CosaBuild.Meta.BuildArtifacts.LiveKernel == nil { - return nil, fmt.Errorf("build %s has no live installer kernel", inst.CosaBuild.Meta.Name) + return nil +} + +func (inst *Install) PXE(kargs []string, liveIgnition, ignition conf.Conf, offline bool) (*InstalledMachine, error) { + artifacts := []string{"live-kernel", "live-rootfs"} + if err := inst.checkArtifactsExist(artifacts); err != nil { + return nil, err } inst.kargs = kargs @@ -182,14 +199,14 @@ func setupMetalImage(builddir, metalimg, destdir string) (string, error) { } func (inst *Install) setup(kern *kernelSetup) (*installerRun, error) { - if kern.kernel == "" { - return nil, fmt.Errorf("Missing kernel artifact") - } - if kern.initramfs == "" { - return nil, fmt.Errorf("Missing initramfs artifact") + var artifacts []string + if inst.Native4k { + artifacts = append(artifacts, "metal4k") + } else { + artifacts = append(artifacts, "metal") } - if kern.rootfs == "" { - return nil, fmt.Errorf("Missing rootfs artifact") + if err := inst.checkArtifactsExist(artifacts); err != nil { + return nil, err } builder := inst.Builder @@ -251,7 +268,7 @@ func (inst *Install) setup(kern *kernelSetup) (*installerRun, error) { pxe := pxeSetup{} pxe.tftpipaddr = "192.168.76.2" - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "x86_64": pxe.networkdevice = "e1000" if builder.Firmware == "uefi" { @@ -281,7 +298,7 @@ func (inst *Install) setup(kern *kernelSetup) (*installerRun, error) { pxe.tftpipaddr = "10.0.2.2" pxe.bootindex = "1" default: - return nil, fmt.Errorf("Unsupported arch %s" + system.RpmArch()) + return nil, fmt.Errorf("Unsupported arch %s" + coreosarch.CurrentRpmArch()) } mux := http.NewServeMux() @@ -317,7 +334,7 @@ func (inst *Install) setup(kern *kernelSetup) (*installerRun, error) { } func renderBaseKargs() []string { - return append(baseKargs, fmt.Sprintf("console=%s", consoleKernelArgument[system.RpmArch()])) + return append(baseKargs, fmt.Sprintf("console=%s", consoleKernelArgument[coreosarch.CurrentRpmArch()])) } func renderInstallKargs(t *installerRun, offline bool) []string { @@ -362,7 +379,7 @@ func (t *installerRun) completePxeSetup(kargs []string) error { KERNEL %s APPEND initrd=%s %s `, t.kern.kernel, t.kern.initramfs, kargsStr)) - if system.RpmArch() == "s390x" { + if coreosarch.CurrentRpmArch() == "s390x" { pxeconfig = []byte(kargsStr) } if err := ioutil.WriteFile(filepath.Join(pxeconfigdir, "default"), pxeconfig, 0777); err != nil { @@ -419,11 +436,25 @@ func (t *installerRun) completePxeSetup(kargs []string) error { func switchBootOrderSignal(qinst *QemuInstance, bootstartedchan *os.File, booterrchan *chan error) { *booterrchan = make(chan error) + go func() { + err := qinst.Wait() + // only one Wait() gets process data, so also manually check for signal + if err == nil && qinst.Signaled() { + err = errors.New("process killed") + } + if err != nil { + *booterrchan <- errors.Wrapf(err, "QEMU unexpectedly exited while waiting for %s", bootStartedSignal) + } + }() go func() { r := bufio.NewReader(bootstartedchan) l, err := r.ReadString('\n') if err != nil { if err == io.EOF { + // this may be from QEMU getting killed or exiting; wait a bit + // to give a chance for .Wait() above to feed the channel with a + // better error + time.Sleep(1 * time.Second) *booterrchan <- fmt.Errorf("Got EOF from boot started channel, %s expected", bootStartedSignal) } else { *booterrchan <- errors.Wrapf(err, "reading from boot started channel") @@ -467,7 +498,7 @@ func (t *installerRun) run() (*QemuInstance, error) { builder := t.builder netdev := fmt.Sprintf("%s,netdev=mynet0,mac=52:54:00:12:34:56", t.pxe.networkdevice) if t.pxe.bootindex == "" { - builder.Append("-boot", "once=n", "-option-rom", "/usr/share/qemu/pxe-rtl8139.rom") + builder.Append("-boot", "once=n") } else { netdev += fmt.Sprintf(",bootindex=%s", t.pxe.bootindex) } @@ -531,13 +562,14 @@ type installerConfig struct { } func (inst *Install) InstallViaISOEmbed(kargs []string, liveIgnition, targetIgnition conf.Conf, outdir string, offline, minimal bool) (*InstalledMachine, error) { - if !inst.Native4k && inst.CosaBuild.Meta.BuildArtifacts.Metal == nil { - return nil, fmt.Errorf("Build %s must have a `metal` artifact", inst.CosaBuild.Meta.OstreeVersion) - } else if inst.Native4k && inst.CosaBuild.Meta.BuildArtifacts.Metal4KNative == nil { - return nil, fmt.Errorf("Build %s must have a `metal4k` artifact", inst.CosaBuild.Meta.OstreeVersion) + artifacts := []string{"live-iso"} + if inst.Native4k { + artifacts = append(artifacts, "metal4k") + } else { + artifacts = append(artifacts, "metal") } - if inst.CosaBuild.Meta.BuildArtifacts.LiveIso == nil { - return nil, fmt.Errorf("Build %s must have a live ISO", inst.CosaBuild.Meta.Name) + if err := inst.checkArtifactsExist(artifacts); err != nil { + return nil, err } if minimal && offline { // ideally this'd be one enum parameter panic("Can't run minimal install offline") diff --git a/mantle/platform/platform.go b/mantle/platform/platform.go index 405faafc..767e21c2 100644 --- a/mantle/platform/platform.go +++ b/mantle/platform/platform.go @@ -21,6 +21,7 @@ import ( "io" "os/exec" "path/filepath" + "strconv" "strings" "sync" "time" @@ -198,11 +199,16 @@ type RuntimeConfig struct { NoSSHKeyInUserData bool // don't inject SSH key into Ignition/cloud-config NoSSHKeyInMetadata bool // don't add SSH key to platform metadata + NoInstanceCreds bool // don't grant credentials (AWS instance profile, GCP service account) to the instance AllowFailedUnits bool // don't fail CheckMachine if a systemd unit has failed WarningsAction conf.WarningsAction // what to do on Ignition or Butane validation warnings // InternetAccess is true if the cluster should be Internet connected InternetAccess bool + EarlyRelease func() + + // whether a Manhole into a machine should be created on detected failure + SSHOnTestFailure bool } // Wrap a StdoutPipe as a io.ReadCloser @@ -430,12 +436,33 @@ func checkSystemdUnitFailures(output string, distribution string) error { } } if len(failedUnits) > 0 { - return fmt.Errorf("some systemd units failed:\n%s", output) + return fmt.Errorf("some systemd units failed: %s", failedUnits) } return nil } +// checkSystemdUnitStuck ensures that no system unit stuck in activating state. +// https://github.com/coreos/coreos-assembler/issues/2798 +// See https://bugzilla.redhat.com/show_bug.cgi?id=2072050 +func checkSystemdUnitStuck(output string, m Machine) error { + if len(output) == 0 { + return nil + } + var NRestarts int + for _, unit := range strings.Split(output, "\n") { + out, stderr, err := m.SSH(fmt.Sprintf("systemctl show -p NRestarts --value %s", unit)) + if err != nil { + return fmt.Errorf("failed to query systemd unit NRestarts: %s: %v: %s", out, err, stderr) + } + NRestarts, _ = strconv.Atoi(string(out)) + if NRestarts >= 2 { + return fmt.Errorf("systemd units %s has %v restarts", unit, NRestarts) + } + } + return nil +} + // CheckMachine tests a machine for various error conditions such as ssh // being available and no systemd units failing at the time ssh is reachable. // It also ensures the remote system is running Container Linux by CoreOS or @@ -468,26 +495,67 @@ func CheckMachine(ctx context.Context, m Machine) error { // ensure we're talking to a supported system var distribution string switch string(out) { - case `NestOS-nestos`: + case `nestos-container`, `NestOS-nestos`: distribution = "nestos" + // Reserved for older versions of NestOS case `fedora-coreos`: distribution = "fcos" - case `centos-coreos`, `rhcos-`: + case `scos-`, `scos-coreos`, `rhcos-`, `rhcos-coreos`: distribution = "rhcos" default: return fmt.Errorf("not a supported instance: %v", string(out)) } - if !m.RuntimeConf().AllowFailedUnits { - // Ensure no systemd units failed during boot - out, stderr, err = m.SSH("busctl --json=short call org.freedesktop.systemd1 /org/freedesktop/systemd1 org.freedesktop.systemd1.Manager ListUnitsFiltered as 2 state failed | jq -r '.data[][][0]'") - if err != nil { - return fmt.Errorf("failed to query systemd for failed units: %s: %v: %s", out, err, stderr) - } - err = checkSystemdUnitFailures(string(out), distribution) - if err != nil { - return err + // check systemd version on host to see if we can use `busctl --json=short` + var systemdVer int + var systemdCmd, failedUnitsCmd, activatingUnitsCmd string + var systemdFailures bool + minSystemdVer := 240 + out, stderr, err = m.SSH("rpm -q --queryformat='%{VERSION}\n' systemd") + if err != nil { + return fmt.Errorf("failed to query systemd RPM for version: %s: %v: %s", out, err, stderr) + } + // Fedora can use XXX.Y as a version string, so just use the major version + systemdVer, _ = strconv.Atoi(string(out[0:3])) + + if systemdVer >= minSystemdVer { + systemdCmd = "busctl --json=short call org.freedesktop.systemd1 /org/freedesktop/systemd1 org.freedesktop.systemd1.Manager ListUnitsFiltered as 2 state status | jq -r '.data[][][0]'" + } else { + systemdCmd = "systemctl --no-legend --state status list-units | awk '{print $1}'" + } + failedUnitsCmd = strings.Replace(systemdCmd, "status", "failed", -1) + activatingUnitsCmd = strings.Replace(systemdCmd, "status", "activating", -1) + + // Ensure no systemd units failed during boot + out, stderr, err = m.SSH(failedUnitsCmd) + if err != nil { + return fmt.Errorf("failed to query systemd for failed units: %s: %v: %s", out, err, stderr) + } + err = checkSystemdUnitFailures(string(out), distribution) + if err != nil { + plog.Error(err) + systemdFailures = true + } + + // Ensure no systemd units stuck in activating state + out, stderr, err = m.SSH(activatingUnitsCmd) + if err != nil { + return fmt.Errorf("failed to query systemd for activating units: %s: %v: %s", out, err, stderr) + } + err = checkSystemdUnitStuck(string(out), m) + if err != nil { + plog.Error(err) + systemdFailures = true + } + + if systemdFailures && !m.RuntimeConf().AllowFailedUnits { + if m.RuntimeConf().SSHOnTestFailure { + plog.Error("dropping to shell: detected failed or stuck systemd units") + if err := Manhole(m); err != nil { + plog.Errorf("failed to get terminal via ssh: %v", err) + } } + return fmt.Errorf("detected failed or stuck systemd units") } return ctx.Err() diff --git a/mantle/platform/qemu.go b/mantle/platform/qemu.go index 1decb182..015356c5 100644 --- a/mantle/platform/qemu.go +++ b/mantle/platform/qemu.go @@ -45,11 +45,14 @@ import ( "github.com/coreos/mantle/platform/conf" "github.com/coreos/mantle/util" + coreosarch "github.com/coreos/stream-metadata-go/arch" "github.com/digitalocean/go-qemu/qmp" "github.com/coreos/mantle/system" "github.com/coreos/mantle/system/exec" "github.com/pkg/errors" + + "golang.org/x/sys/unix" ) var ( @@ -144,6 +147,11 @@ type QemuInstance struct { qmpSocketPath string } +// Signaled returns whether QEMU process was signaled. +func (inst *QemuInstance) Signaled() bool { + return inst.qemu.Signaled() +} + // Pid returns the PID of QEMU process. func (inst *QemuInstance) Pid() int { return inst.qemu.Pid() @@ -287,7 +295,7 @@ func (inst *QemuInstance) Destroy() { // is used to boot from the network device (boot once is not supported). For s390x, the boot ordering was not a problem as it // would always read from disk first. For aarch64, the bootindex needs to be switched to boot from disk before a reboot func (inst *QemuInstance) SwitchBootOrder() (err2 error) { - if system.RpmArch() != "s390x" && system.RpmArch() != "aarch64" { + if coreosarch.CurrentRpmArch() != "s390x" && coreosarch.CurrentRpmArch() != "aarch64" { //Not applicable for other arches return nil } @@ -312,7 +320,7 @@ func (inst *QemuInstance) SwitchBootOrder() (err2 error) { } // Get boot device (for iso-installs) and block device for _, dev := range blkdevs.Return { - devpath := filepath.Clean(strings.Trim(dev.DevicePath, "virtio-backend")) + devpath := filepath.Clean(strings.TrimSuffix(dev.DevicePath, "virtio-backend")) switch dev.Device { case "installiso": bootdev = devpath @@ -520,13 +528,13 @@ func (builder *QemuBuilder) AddFd(fd *os.File) string { // virtio returns a virtio device argument for qemu, which is architecture dependent func virtio(device, args string) string { var suffix string - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "x86_64", "ppc64le", "aarch64": suffix = "pci" case "s390x": suffix = "ccw" default: - panic(fmt.Sprintf("RpmArch %s unhandled in virtio()", system.RpmArch())) + panic(fmt.Sprintf("RpmArch %s unhandled in virtio()", coreosarch.CurrentRpmArch())) } return fmt.Sprintf("virtio-%s-%s,%s", device, suffix, args) } @@ -602,7 +610,7 @@ func (builder *QemuBuilder) Mount9p(source, destHint string, readonly bool) { // supportsFwCfg if the target system supports injecting // Ignition via the qemu -fw_cfg option. func (builder *QemuBuilder) supportsFwCfg() bool { - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "s390x", "ppc64le": return false } @@ -611,8 +619,9 @@ func (builder *QemuBuilder) supportsFwCfg() bool { // supportsSwtpm if the target system supports a virtual TPM device func (builder *QemuBuilder) supportsSwtpm() bool { - if system.RpmArch() == "s390x" { - // ppc64le and aarch64 support TPM as of f33. s390x does not support a backend for TPM + switch coreosarch.CurrentRpmArch() { + case "s390x": + // s390x does not support a backend for TPM return false } return true @@ -657,10 +666,20 @@ func newGuestfish(diskImagePath string, diskSectorSize int) (*coreosGuestfish, e guestfishArgs = append(guestfishArgs, "-a", diskImagePath) cmd := exec.Command("guestfish", guestfishArgs...) cmd.Env = append(os.Environ(), "LIBGUESTFS_BACKEND=direct") - switch system.RpmArch() { + + // Hack to run with a wrapper on older P8 hardware running RHEL7 + switch coreosarch.CurrentRpmArch() { case "ppc64le": - cmd.Env = append(os.Environ(), "LIBGUESTFS_HV=/usr/lib/coreos-assembler/libguestfs-ppc64le-wrapper.sh") + u := unix.Utsname{} + if err := unix.Uname(&u); err != nil { + return nil, errors.Wrapf(err, "detecting kernel information") + } + if strings.Contains(fmt.Sprintf("%s", u.Release), "el7") { + plog.Infof("Detected el7. Running using libguestfs-ppc64le-wrapper.sh") + cmd.Env = append(cmd.Env, "LIBGUESTFS_HV=/usr/lib/coreos-assembler/libguestfs-ppc64le-wrapper.sh") + } } + // make sure it inherits stderr so we see any error message cmd.Stderr = os.Stderr stdout, err := cmd.StdoutPipe() @@ -930,17 +949,16 @@ func (builder *QemuBuilder) addDiskImpl(disk *Disk, primary bool) error { // Each disk is presented on its own controller. // The WWN needs to be a unique uint64 number - rand.Seed(time.Now().UnixNano()) wwn := rand.Uint64() var bus string - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "x86_64", "ppc64le", "aarch64": bus = "pci" case "s390x": bus = "ccw" default: - panic(fmt.Sprintf("Mantle doesn't know which bus type to use on %s", system.RpmArch())) + panic(fmt.Sprintf("Mantle doesn't know which bus type to use on %s", coreosarch.CurrentRpmArch())) } for i := 0; i < 2; i++ { @@ -1036,10 +1054,22 @@ func (builder *QemuBuilder) finalize() { return } if builder.Memory == 0 { - memory := 10240 - switch system.RpmArch() { + // FIXME; Required memory should really be a property of the tests, and + // let's try to drop these arch-specific overrides. ARM was bumped via + // commit 09391907c0b25726374004669fa6c2b161e3892f + // Commit: Geoff Levand + // CommitDate: Mon Aug 21 12:39:34 2017 -0700 + // + // kola: More memory for arm64 qemu guest machines + // + // arm64 guest machines seem to run out of memory with 1024 MiB of + // RAM, so increase to 2048 MiB. + + // Then later, other non-x86_64 seemed to just copy that. + memory := 1024 + switch coreosarch.CurrentRpmArch() { case "aarch64", "s390x", "ppc64le": - memory = 10240 + memory = 2048 } builder.Memory = memory } @@ -1061,7 +1091,7 @@ func baseQemuArgs() []string { kvm = false } var ret []string - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "x86_64": ret = []string{ "qemu-system-x86_64", @@ -1083,16 +1113,23 @@ func baseQemuArgs() []string { "-machine", "pseries,kvm-type=HV,vsmt=8,cap-fwnmi=off," + accel, } default: - panic(fmt.Sprintf("RpmArch %s combo not supported for qemu ", system.RpmArch())) + panic(fmt.Sprintf("RpmArch %s combo not supported for qemu ", coreosarch.CurrentRpmArch())) } if kvm { ret = append(ret, "-cpu", "host") + } else { + if coreosarch.CurrentRpmArch() == "x86_64" { + // the default qemu64 CPU model does not support x86_64_v2 + // causing crashes on EL9+ kernels + // see https://bugzilla.redhat.com/show_bug.cgi?id=2060839 + ret = append(ret, "-cpu", "Nehalem") + } } return ret } func (builder *QemuBuilder) setupUefi(secureBoot bool) error { - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "x86_64": varsVariant := "" if secureBoot { @@ -1121,7 +1158,7 @@ func (builder *QemuBuilder) setupUefi(secureBoot bool) error { builder.Append("-machine", "q35") case "aarch64": if secureBoot { - return fmt.Errorf("architecture %s doesn't have support for secure boot in kola", system.RpmArch()) + return fmt.Errorf("architecture %s doesn't have support for secure boot in kola", coreosarch.CurrentRpmArch()) } vars, err := ioutil.TempFile("", "mantle-qemu") if err != nil { @@ -1139,10 +1176,10 @@ func (builder *QemuBuilder) setupUefi(secureBoot bool) error { } fdset := builder.AddFd(vars) - builder.Append("-drive", "file=/usr/share/edk2/aarch64/QEMU_EFI-pflash.raw,if=pflash,format=raw,unit=0,readonly=on,auto-read-only=off") + builder.Append("-drive", "file=/usr/share/edk2/aarch64/QEMU_EFI-silent-pflash.raw,if=pflash,format=raw,unit=0,readonly=on,auto-read-only=off") builder.Append("-drive", fmt.Sprintf("file=%s,if=pflash,format=raw,unit=1,readonly=off,auto-read-only=off", fdset)) default: - panic(fmt.Sprintf("Architecture %s doesn't have support for UEFI in qemu.", system.RpmArch())) + panic(fmt.Sprintf("Architecture %s doesn't have support for UEFI in qemu.", coreosarch.CurrentRpmArch())) } return nil @@ -1196,7 +1233,7 @@ func (builder *QemuBuilder) setupIso() error { if kargsSupported, err := coreosInstallerSupportsISOKargs(); err != nil { return err } else if kargsSupported { - allargs := fmt.Sprintf("console=%s %s", consoleKernelArgument[system.RpmArch()], builder.AppendKernelArgs) + allargs := fmt.Sprintf("console=%s %s", consoleKernelArgument[coreosarch.CurrentRpmArch()], builder.AppendKernelArgs) instCmdKargs := exec.Command("nestos-installer", "iso", "kargs", "modify", "--append", allargs, isoEmbeddedPath) var stderrb bytes.Buffer instCmdKargs.Stderr = &stderrb @@ -1204,11 +1241,11 @@ func (builder *QemuBuilder) setupIso() error { // Don't make this a hard error if it's just for console; we // may be operating on an old live ISO if len(builder.AppendKernelArgs) > 0 { - return errors.Wrapf(err, "running `nestos-installer iso kargs modify`; old CoreOS ISO?") + return errors.Wrapf(err, "running `nestos-installer iso kargs modify`; old NestOS ISO?") } stderr := stderrb.String() plog.Warningf("running nestos-installer iso kargs modify: %v: %q", err, stderr) - plog.Warning("likely targeting an old CoreOS ISO; ignoring...") + plog.Warning("likely targeting an old NestOS ISO; ignoring...") } } else if len(builder.AppendKernelArgs) > 0 { return fmt.Errorf("nestos-installer does not support appending kernel args") @@ -1238,7 +1275,7 @@ func (builder *QemuBuilder) setupIso() error { // will fall back to the ISO boot. On reboot when the system is installed, the // primary disk is selected. This allows us to have "boot once" functionality on // both UEFI and BIOS (`-boot once=d` OTOH doesn't work with OVMF). - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "s390x", "ppc64le", "aarch64": if builder.isoAsDisk { // we could do it, but boot would fail @@ -1297,10 +1334,10 @@ func (builder *QemuBuilder) SerialPipe() (*os.File, error) { // VirtioJournal configures the OS and VM to stream the systemd journal // (post-switchroot) over a virtio-serial channel. -// - The first parameter is a poitner to the configuration of the target VM. -// - The second parameter is an optional queryArguments to filter the stream - -// see `man journalctl` for more information. -// - The return value is a file stream which will be newline-separated JSON. +// - The first parameter is a poitner to the configuration of the target VM. +// - The second parameter is an optional queryArguments to filter the stream - +// see `man journalctl` for more information. +// - The return value is a file stream which will be newline-separated JSON. func (builder *QemuBuilder) VirtioJournal(config *conf.Conf, queryArguments string) (*os.File, error) { stream, err := builder.VirtioChannelRead("mantlejournal") if err != nil { @@ -1481,7 +1518,7 @@ func (builder *QemuBuilder) Exec() (*QemuInstance, error) { } argv = append(argv, "-chardev", fmt.Sprintf("socket,id=chrtpm,path=%s", swtpmSock), "-tpmdev", "emulator,id=tpm0,chardev=chrtpm") // There are different device backends on each architecture - switch system.RpmArch() { + switch coreosarch.CurrentRpmArch() { case "x86_64": argv = append(argv, "-device", "tpm-tis,tpmdev=tpm0") case "aarch64": diff --git a/mantle/platform/util.go b/mantle/platform/util.go index 1e25a1da..ef3bd7ae 100644 --- a/mantle/platform/util.go +++ b/mantle/platform/util.go @@ -27,7 +27,7 @@ import ( "github.com/pkg/errors" "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" ) // Manhole connects os.Stdin, os.Stdout, and os.Stderr to an interactive shell @@ -36,13 +36,13 @@ import ( // error. func Manhole(m Machine) (err error) { fd := int(os.Stdin.Fd()) - if !terminal.IsTerminal(fd) { + if !term.IsTerminal(fd) { return nil } - tstate, _ := terminal.MakeRaw(fd) + tstate, _ := term.MakeRaw(fd) defer func() { - e := terminal.Restore(fd, tstate) + e := term.Restore(fd, tstate) if err != nil { err = fmt.Errorf("%v; %v", err, e) } else { @@ -73,7 +73,7 @@ func Manhole(m Machine) (err error) { ssh.TTY_OP_OSPEED: 115200, } - cols, lines, err := terminal.GetSize(int(os.Stdin.Fd())) + cols, lines, err := term.GetSize(int(os.Stdin.Fd())) if err != nil { return err } diff --git a/mantle/rhcos/metadata.go b/mantle/rhcos/metadata.go index d1e16d96..ec7529ee 100644 --- a/mantle/rhcos/metadata.go +++ b/mantle/rhcos/metadata.go @@ -21,9 +21,8 @@ import ( "net/http" "net/url" + coreosarch "github.com/coreos/stream-metadata-go/arch" "github.com/coreos/stream-metadata-go/stream" - - "github.com/coreos/mantle/system" ) const ( @@ -86,5 +85,5 @@ func FetchStreamArtifacts(stream, architecture string) (*stream.Arch, error) { // FetchStreamThisArchitecture returns artifacts for the current architecture from // the given stream. func FetchStreamThisArchitecture(stream string) (*stream.Arch, error) { - return FetchStreamArtifacts(stream, system.RpmArch()) + return FetchStreamArtifacts(stream, coreosarch.CurrentRpmArch()) } diff --git a/mantle/sdk/const.go b/mantle/sdk/const.go deleted file mode 100644 index 9bde87d1..00000000 --- a/mantle/sdk/const.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdk - -import ( - "github.com/pborman/uuid" -) - -// Partition UUIDs for CoreOS systems. -var ( - USRAUUID = uuid.UUID{0x71, 0x30, 0xC9, 0x4A, 0x21, 0x3A, 0x4E, 0x5A, 0x8E, 0x26, 0x6C, 0xCE, 0x96, 0x62, 0xF1, 0x32} - USRBUUID = uuid.UUID{0xE0, 0x3D, 0xD3, 0x5C, 0x7C, 0x2D, 0x4A, 0x47, 0xB3, 0xFE, 0x27, 0xF1, 0x57, 0x80, 0xA5, 0x7C} -) diff --git a/mantle/sdk/download.go b/mantle/sdk/download.go deleted file mode 100644 index ec224757..00000000 --- a/mantle/sdk/download.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdk - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "github.com/coreos/pkg/capnslog" - "google.golang.org/api/option" - "google.golang.org/api/storage/v1" - - "github.com/coreos/mantle/util" -) - -var plog = capnslog.NewPackageLogger("github.com/coreos/mantle", "sdk") - -func DownloadFile(file, fileURL string, client *http.Client) error { - plog.Infof("Downloading %s to %s", fileURL, file) - - // handle bucket urls by using api to get media link - parseURL, err := url.Parse(fileURL) - if err != nil { - return err - } - resolveGS := func() error { - if client == nil { - client = http.DefaultClient - } - api, err := storage.NewService(context.Background(), option.WithHTTPClient(client)) - if err != nil { - return err - } - path := strings.TrimLeft(parseURL.Path, "/") - obj, err := api.Objects.Get(parseURL.Host, path).Do() - if err != nil { - return fmt.Errorf("%s: %s", err, fileURL) - } - fileURL = obj.MediaLink - return nil - } - if parseURL.Scheme == "gs" { - if err := util.Retry(5, 1*time.Second, resolveGS); err != nil { - return err - } - } - - if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil { - return err - } - - download := func() error { - return downloadFile(file, fileURL, client) - } - if err := util.Retry(5, 1*time.Second, download); err != nil { - return err - } - return nil -} - -func downloadFile(file, url string, client *http.Client) error { - if client == nil { - client = http.DefaultClient - } - - dst, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return err - } - defer dst.Close() - - pos, err := dst.Seek(0, os.SEEK_END) - if err != nil { - return err - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return err - } - - if pos != 0 { - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", pos)) - } - - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - var length int64 - switch resp.StatusCode { - case http.StatusOK: - if pos != 0 { - if _, err := dst.Seek(0, io.SeekStart); err != nil { - return err - } - if err := dst.Truncate(0); err != nil { - return err - } - pos = 0 - } - length = resp.ContentLength - case http.StatusPartialContent: - var end int64 - n, _ := fmt.Sscanf(resp.Header.Get("Content-Range"), - "bytes %d-%d/%d", &pos, &end, &length) - if n != 3 { - return fmt.Errorf("Bad Content-Range for %s", resp.Request.URL) - } - - if _, err := dst.Seek(pos, io.SeekStart); err != nil { - return err - } - plog.Infof("Resuming from byte %d", pos) - case http.StatusRequestedRangeNotSatisfiable: - plog.Infof("Download already complete") - return nil - default: - return fmt.Errorf("%s: %s", resp.Status, resp.Request.URL) - } - - prefix := filepath.Base(file) - if n, err := util.CopyProgress(capnslog.INFO, prefix, dst, resp.Body, resp.ContentLength); err != nil { - return err - } else if n != length-pos { - // unsure if this is worth caring about - plog.Infof("Downloaded %d bytes, expected %d", n, length-pos) - return nil - } else { - plog.Infof("Downloaded %d bytes", n) - return nil - } -} - -func DownloadSignedFile(file, url string, client *http.Client, verifyKeyFile string) error { - - if _, err := os.Stat(file + ".sig"); err == nil { - if e := VerifyFile(file, verifyKeyFile); e == nil { - plog.Infof("Verified existing file: %s", file) - return nil - } - } - - if err := DownloadFile(file, url, client); err != nil { - return err - } - - if err := DownloadFile(file+".sig", url+".sig", client); err != nil { - return err - } - - if err := VerifyFile(file, verifyKeyFile); err != nil { - return err - } - - plog.Infof("Verified file: %s", file) - return nil -} - -// false if both files do not exist -func cmpFileBytes(file1, file2 string) (bool, error) { - info1, err := os.Stat(file1) - if err != nil { - return false, err - } - info2, err := os.Stat(file2) - if err != nil { - return false, err - } - if info1.Size() != info2.Size() { - return false, nil - } - - f1, err := os.Open(file1) - if err != nil { - return false, err - } - defer f1.Close() - f2, err := os.Open(file2) - if err != nil { - return false, err - } - defer f2.Close() - - const defaultBufSize = 4096 // same as bufio - buf1 := make([]byte, defaultBufSize) - buf2 := make([]byte, defaultBufSize) - - for { - n1, err1 := io.ReadFull(f1, buf1) - n2, err2 := io.ReadFull(f2, buf2) - - if err1 == io.EOF && err2 == io.EOF { - return true, nil - } else if err1 == io.EOF || err2 == io.EOF { - return false, nil - } - - if err1 == io.ErrUnexpectedEOF && err2 == io.ErrUnexpectedEOF { - return bytes.Equal(buf1[:n1], buf2[:n2]), nil - } else if err1 == io.ErrUnexpectedEOF || err2 == io.ErrUnexpectedEOF { - return false, nil - } - - if err1 != nil { - return false, err1 - } - if err2 != nil { - return false, err2 - } - - if !bytes.Equal(buf1, buf2) { - return false, nil - } - } -} - -// UpdateFile downloads a file to temp dir and replaces the file only if -// contents have changed. If tempDir is "" default will be os.TempDir(). -// Leave client nil to use default. -func UpdateFile(file, url string, client *http.Client) error { - if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil { - return err - } - - tempFile := file + ".part" - if err := DownloadFile(tempFile, url, client); err != nil { - return fmt.Errorf("%s: %s", url, err) - } - defer os.Remove(tempFile) - - equal, err := cmpFileBytes(file, tempFile) - if os.IsExist(err) { // file may not exist, that is ok - return err - } - if equal { - plog.Infof("%v is up to date", file) - return nil - } - - // not equal so delete any existing file and rename tempFile to file - if err := os.Rename(tempFile, file); err != nil { - return err - } - return nil -} - -// UpdateSignedFile will download and replace the local file if the -// published signature doesn't match the local copy. Leave client nil to -// use default. -func UpdateSignedFile(file, url string, client *http.Client, verifyKeyFile string) error { - sigFile := file + ".sig" - sigURL := url + ".sig" - - // update local sig to latest - if err := UpdateFile(sigFile, sigURL, client); err != nil { - return err - } - - // try to verify with latest sig - if e := VerifyFile(file, verifyKeyFile); e == nil { - plog.Infof("Verified existing file: %s", file) - return nil - } - - // download image and try to verify again - if err := UpdateFile(file, url, client); err != nil { - return err - } - if err := VerifyFile(file, verifyKeyFile); err != nil { - return err - } - - plog.Infof("Verified file: %s", file) - return nil -} diff --git a/mantle/sdk/repo/manifest.go b/mantle/sdk/repo/manifest.go deleted file mode 100644 index 40805625..00000000 --- a/mantle/sdk/repo/manifest.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// Copyright 2008 The Android Open Source Project -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// repo is a limited implementation of the python repo git front end. -// -// Manifest Format -// -// A repo manifest describes the structure of a repo client; that is -// the directories that are visible and where they should be obtained -// from with git. -// -// The basic structure of a manifest is a bare Git repository holding -// a single 'default.xml' XML file in the top level directory. -// -// Manifests are inherently version controlled, since they are kept -// within a Git repository. Updates to manifests are automatically -// obtained by clients during `repo sync`. -// -// A manifest XML file (e.g. 'default.xml') roughly conforms to the -// following DTD. The python code is the only authoritative source. -// -// Local Manifests -// -// Additional remotes and projects may be added through local manifest -// files stored in `$TOP_DIR/.repo/local_manifests/*.xml`. -// -// For example: -// -// $ ls .repo/local_manifests -// local_manifest.xml -// another_local_manifest.xml -// -// $ cat .repo/local_manifests/local_manifest.xml -// -// -// -// -// -// -// Users may add projects to the local manifest(s) prior to a `repo sync` -// invocation, instructing repo to automatically download and manage -// these extra projects. -// -// Manifest files stored in `$TOP_DIR/.repo/local_manifests/*.xml` will -// be loaded in alphabetical order. -// -// Additional remotes and projects may also be added through a local -// manifest, stored in `$TOP_DIR/.repo/local_manifest.xml`. This method -// is deprecated in favor of using multiple manifest files as mentioned -// above. -// -// If `$TOP_DIR/.repo/local_manifest.xml` exists, it will be loaded before -// any manifest files stored in `$TOP_DIR/.repo/local_manifests/*.xml`. -package repo - -import ( - "encoding/xml" -) - -// Manifest is the root element of the file. -// -// -// -// -// -type Manifest struct { - XMLName xml.Name `xml:"manifest"` - Includes []Include `xml:"include"` - Notice string `xml:"notice"` - Remotes []Remote `xml:"remote"` - Default *Default `xml:"default"` - ManifestServer *ManifestServer `xml:"manifest-server"` - Projects []Project `xml:"project"` - ExtendProjects []ExtendProject `xml:"extend-project"` - RemoveProjects []RemoveProject `xml:"remove-project"` - RepoHooks *RepoHooks `xml:"repo-hooks"` -} - -// Remote -// -// -// -// -// -// -// -// -// One or more remote elements may be specified. Each remote element -// specifies a Git URL shared by one or more projects and (optionally) -// the Gerrit review server those projects upload changes through. -// -// Attribute `name`: A short name unique to this manifest file. The -// name specified here is used as the remote name in each project's -// .git/config, and is therefore automatically available to commands -// like `git fetch`, `git remote`, `git pull` and `git push`. -// -// Attribute `alias`: The alias, if specified, is used to override -// `name` to be set as the remote name in each project's .git/config. -// Its value can be duplicated while attribute `name` has to be unique -// in the manifest file. This helps each project to be able to have -// same remote name which actually points to different remote url. -// -// Attribute `fetch`: The Git URL prefix for all projects which use -// this remote. Each project's name is appended to this prefix to -// form the actual URL used to clone the project. -// -// Attribute `review`: Hostname of the Gerrit server where reviews -// are uploaded to by `repo upload`. This attribute is optional; -// if not specified then `repo upload` will not function. -// -// Attribute `revision`: Name of a Git branch (e.g. `master` or -// `refs/heads/master`). Remotes with their own revision will override -// the default revision. -// -type Remote struct { - Name string `xml:"name,attr"` - Alias string `xml:"alias,attr,omitempty"` - Fetch string `xml:"fetch,attr"` - Review string `xml:"review,attr,omitempty"` - Revision string `xml:"revision,attr,omitempty"` -} - -// Default -// -// -// -// -// -// -// -// -// -// At most one default element may be specified. Its remote and -// revision attributes are used when a project element does not -// specify its own remote or revision attribute. -// -// Attribute `remote`: Name of a previously defined remote element. -// Project elements lacking a remote attribute of their own will use -// this remote. -// -// Attribute `revision`: Name of a Git branch (e.g. `master` or -// `refs/heads/master`). Project elements lacking their own -// revision attribute will use this revision. -// -// Attribute `dest-branch`: Name of a Git branch (e.g. `master`). -// Project elements not setting their own `dest-branch` will inherit -// this value. If this value is not set, projects will use `revision` -// by default instead. -// -// Attribute `sync-j`: Number of parallel jobs to use when synching. -// -// Attribute `sync-c`: Set to true to only sync the given Git -// branch (specified in the `revision` attribute) rather than the -// whole ref space. Project elements lacking a sync-c element of -// their own will use this value. -// -// Attribute `sync-s`: Set to true to also sync sub-projects. -// -type Default struct { - Remote string `xml:"remote,attr,omitempty"` - Revision string `xml:"revision,attr,omitempty"` - DestBranch string `xml:"dest-branch,attr,omitempty"` - SyncJobs string `xml:"sync-j,attr,omitempty"` - SyncBranch string `xml:"sync-c,attr,omitempty"` - SyncSubProjects string `xml:"sync-s,attr,omitempty"` -} - -// ManifestServer -// -// -// -// -// At most one manifest-server may be specified. The url attribute -// is used to specify the URL of a manifest server, which is an -// XML RPC service. -// -// The manifest server should implement the following RPC methods: -// -// GetApprovedManifest(branch, target) -// -// Return a manifest in which each project is pegged to a known good revision -// for the current branch and target. -// -// The target to use is defined by environment variables TARGET_PRODUCT -// and TARGET_BUILD_VARIANT. These variables are used to create a string -// of the form $TARGET_PRODUCT-$TARGET_BUILD_VARIANT, e.g. passion-userdebug. -// If one of those variables or both are not present, the program will call -// GetApprovedManifest without the target parameter and the manifest server -// should choose a reasonable default target. -// -// GetManifest(tag) -// -// Return a manifest in which each project is pegged to the revision at -// the specified tag. -// -type ManifestServer struct { - URL string `xml:"url,attr"` -} - -// Project -// -// -// -// -// -// -// -// -// -// -// -// -// -// -// One or more project elements may be specified. Each element -// describes a single Git repository to be cloned into the repo -// client workspace. You may specify Git-submodules by creating a -// nested project. Git-submodules will be automatically -// recognized and inherit their parent's attributes, but those -// may be overridden by an explicitly specified project element. -// -// Attribute `name`: A unique name for this project. The project's -// name is appended onto its remote's fetch URL to generate the actual -// URL to configure the Git remote with. The URL gets formed as: -// -// ${remote_fetch}/${project_name}.git -// -// where ${remote_fetch} is the remote's fetch attribute and -// ${project_name} is the project's name attribute. The suffix ".git" -// is always appended as repo assumes the upstream is a forest of -// bare Git repositories. If the project has a parent element, its -// name will be prefixed by the parent's. -// -// The project name must match the name Gerrit knows, if Gerrit is -// being used for code reviews. -// -// Attribute `path`: An optional path relative to the top directory -// of the repo client where the Git working directory for this project -// should be placed. If not supplied the project name is used. -// If the project has a parent element, its path will be prefixed -// by the parent's. -// -// Attribute `remote`: Name of a previously defined remote element. -// If not supplied the remote given by the default element is used. -// -// Attribute `revision`: Name of the Git branch the manifest wants -// to track for this project. Names can be relative to refs/heads -// (e.g. just "master") or absolute (e.g. "refs/heads/master"). -// Tags and/or explicit SHA-1s should work in theory, but have not -// been extensively tested. If not supplied the revision given by -// the remote element is used if applicable, else the default -// element is used. -// -// Attribute `dest-branch`: Name of a Git branch (e.g. `master`). -// When using `repo upload`, changes will be submitted for code -// review on this branch. If unspecified both here and in the -// default element, `revision` is used instead. -// -// Attribute `groups`: List of groups to which this project belongs, -// whitespace or comma separated. All projects belong to the group -// "all", and each project automatically belongs to a group of -// its name:`name` and path:`path`. E.g. for -// , that project -// definition is implicitly in the following manifest groups: -// default, name:monkeys, and path:barrel-of. If you place a project in the -// group "notdefault", it will not be automatically downloaded by repo. -// If the project has a parent element, the `name` and `path` here -// are the prefixed ones. -// -// Attribute `sync-c`: Set to true to only sync the given Git -// branch (specified in the `revision` attribute) rather than the -// whole ref space. -// -// Attribute `sync-s`: Set to true to also sync sub-projects. -// -// Attribute `upstream`: Name of the Git ref in which a sha1 -// can be found. Used when syncing a revision locked manifest in -// -c mode to avoid having to sync the entire ref space. -// -// Attribute `clone-depth`: Set the depth to use when fetching this -// project. If specified, this value will override any value given -// to repo init with the --depth option on the command line. -// -// Attribute `force-path`: Set to true to force this project to create the -// local mirror repository according to its `path` attribute (if supplied) -// rather than the `name` attribute. This attribute only applies to the -// local mirrors syncing, it will be ignored when syncing the projects in a -// client working directory. -// -type Project struct { - Annotations []Annotation `xml:"annotation"` - SubProjects []Project `xml:"project"` - CopyFiles []CopyFile `xml:"copyfile"` - LinkFiles []LinkFile `xml:"linkfile"` - Name string `xml:"name,attr"` - Path string `xml:"path,attr,omitempty"` - Remote string `xml:"remote,attr,omitempty"` - Revision string `xml:"revision,attr,omitempty"` - DestBranch string `xml:"dest-branch,attr,omitempty"` - Groups string `xml:"groups,attr,omitempty"` - SyncBranch string `xml:"sync-c,attr,omitempty"` - SyncSubProjects string `xml:"sync-s,attr,omitempty"` - Upstream string `xml:"upstream,attr,omitempty"` - CloneDepth string `xml:"clone-depth,attr,omitempty"` - ForcePath string `xml:"force-path,attr,omitempty"` -} - -// ExtendProject -// -// -// -// -// -// -// Modify the attributes of the named project. -// -// This element is mostly useful in a local manifest file, to modify the -// attributes of an existing project without completely replacing the -// existing project definition. This makes the local manifest more robust -// against changes to the original manifest. -// -// Attribute `path`: If specified, limit the change to projects checked out -// at the specified path, rather than all projects with the given name. -// -// Attribute `groups`: List of additional groups to which this project -// belongs. Same syntax as the corresponding element of `project`. -// -type ExtendProject struct { - Name string `xml:"name,attr"` - Path string `xml:"path,attr,omitempty"` - Groups string `xml:"groups,attr,omitempty"` -} - -// Annotation -// -// -// -// -// -// -// Zero or more annotation elements may be specified as children of a -// project element. Each element describes a name-value pair that will be -// exported into each project's environment during a 'forall' command, -// prefixed with REPO__. In addition, there is an optional attribute -// "keep" which accepts the case insensitive values "true" (default) or -// "false". This attribute determines whether or not the annotation will -// be kept when exported with the manifest subcommand. -// -type Annotation struct { - Name string `xml:"name,attr"` - Value string `xml:"value,attr"` - Keep string `xml:"keep,attr,omitempty"` -} - -// CopyFile -// -// -// -// -// -// Zero or more copyfile elements may be specified as children of a -// project element. Each element describes a src-dest pair of files; -// the "src" file will be copied to the "dest" place during 'repo sync' -// command. -// "src" is project relative, "dest" is relative to the top of the tree. -// -type CopyFile struct { - Src string `xml:"src,attr"` - Dest string `xml:"dest,attr"` -} - -// LinkFile -// -// -// -// -// -// It's just like copyfile and runs at the same time as copyfile but -// instead of copying it creates a symlink. -// -type LinkFile struct { - Src string `xml:"src,attr"` - Dest string `xml:"dest,attr"` -} - -// RemoveProject -// -// -// -// -// Deletes the named project from the internal manifest table, possibly -// allowing a subsequent project element in the same manifest file to -// replace the project with a different source. -// -// This element is mostly useful in a local manifest file, where -// the user can remove a project, and possibly replace it with their -// own definition. -// -type RemoveProject struct { - Name string `xml:"name,attr"` -} - -// RepoHooks -// -// -// -// -// -type RepoHooks struct { - InProject string `xml:"in-project,attr"` - EnabledList string `xml:"enabled-list,attr"` -} - -// Include -// -// -// -// -// This element provides the capability of including another manifest -// file into the originating manifest. Normal rules apply for the -// target manifest to include - it must be a usable manifest on its own. -// -// Attribute `name`: the manifest to include, specified relative to -// the manifest repository's root. -// -type Include struct { - Name string `xml:"name,attr"` -} diff --git a/mantle/sdk/repo/manifest_test.go b/mantle/sdk/repo/manifest_test.go deleted file mode 100644 index c738ed20..00000000 --- a/mantle/sdk/repo/manifest_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package repo - -import ( - "encoding/xml" - "testing" - - "github.com/kylelemons/godebug/diff" -) - -// Test manifest document, this is mostly identical to the output of -// `repo manifest -r` except self-closing tags not used (Go doesn't -// output them) and attribute order is a bit different, Go uses struct -// order but Python alphabetizes. -const testManifest = ` - - Your sources have been synced successfully. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -` - -func TestMarshal(t *testing.T) { - var manifest Manifest - if err := xml.Unmarshal([]byte(testManifest), &manifest); err != nil { - t.Fatal(err) - } - - out, err := xml.MarshalIndent(&manifest, "", " ") - if err != nil { - t.Fatal(err) - } - - testResult := xml.Header + string(out) - if d := diff.Diff(testManifest, testResult); d != "" { - t.Fatalf("Unexpected XML:\n%s", d) - } -} diff --git a/mantle/sdk/verify.go b/mantle/sdk/verify.go deleted file mode 100644 index 8002a79b..00000000 --- a/mantle/sdk/verify.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdk - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "golang.org/x/crypto/openpgp" - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/packet" -) - -const rpmgpgKeyring = "/etc/pki/rpm-gpg" - -func Verify(signed, signature io.Reader, verifyKeyFile string) error { - var err error - var keyring openpgp.EntityList - if verifyKeyFile == "" { - keyring, err = generateKeyRingFromDir(rpmgpgKeyring) - if err != nil { - return err - } - } else { - b, err := ioutil.ReadFile(verifyKeyFile) - if err != nil { - return fmt.Errorf("%v: %s", err, verifyKeyFile) - } - keyring, err = openpgp.ReadArmoredKeyRing(strings.NewReader(string(b[:]))) - if err != nil { - return err - } - } - - _, err = openpgp.CheckDetachedSignature(keyring, signed, signature) - return err -} - -func VerifyFile(file, verifyKeyFile string) error { - signed, err := os.Open(file) - if err != nil { - return err - } - defer signed.Close() - - signature, err := os.Open(file + ".sig") - if err != nil { - return err - } - defer signature.Close() - - return Verify(signed, signature, verifyKeyFile) -} - -func generateKeyRingFromDir(dir string) (openpgp.EntityList, error) { - var keyring openpgp.EntityList - - err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() { - return nil - } - f, err := os.OpenFile(path, os.O_RDONLY, 0) - if err != nil { - return err - } - defer f.Close() - - block, err := armor.Decode(f) - if err != nil { - return err - } - - if block.Type != openpgp.PublicKeyType { - return nil - } - - e, err := openpgp.ReadEntity(packet.NewReader(block.Body)) - if err != nil { - return err - } - - keyring = append(keyring, e) - return nil - }) - if err != nil { - return nil, err - } - - return keyring, nil -} diff --git a/mantle/storage/bucket.go b/mantle/storage/bucket.go deleted file mode 100644 index f9d634b3..00000000 --- a/mantle/storage/bucket.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "errors" - "fmt" - "io" - "net/http" - "net/url" - "path" - "strings" - "sync" - - "golang.org/x/net/context" - "google.golang.org/api/googleapi" - "google.golang.org/api/storage/v1" -) - -var ( - UnknownScheme = errors.New("storage: URL missing gs:// scheme") - UnknownBucket = errors.New("storage: URL missing bucket name") -) - -type Bucket struct { - service *storage.Service - name string - prefix string - - mu sync.RWMutex - prefixes map[string]struct{} - objects map[string]*storage.Object - - // writeAlways enables overwriting of objects that appear up-to-date - writeAlways bool - // writeDryRun blocks any changes, merely logging them instead - writeDryRun bool -} - -func NewBucket(client *http.Client, bucketURL string) (*Bucket, error) { - service, err := storage.New(client) - if err != nil { - return nil, err - } - - parsedURL, err := url.Parse(bucketURL) - if err != nil { - return nil, err - } - if parsedURL.Scheme != "gs" { - return nil, UnknownScheme - } - if parsedURL.Host == "" { - return nil, UnknownBucket - } - - return &Bucket{ - service: service, - name: parsedURL.Host, - prefix: FixPrefix(parsedURL.Path), - prefixes: make(map[string]struct{}), - objects: make(map[string]*storage.Object), - }, nil -} - -func (b *Bucket) Name() string { - return b.name -} - -func (b *Bucket) Prefix() string { - return b.prefix -} - -func (b *Bucket) URL() *url.URL { - return &url.URL{Scheme: "gs", Host: b.name, Path: b.prefix} -} - -func (b *Bucket) WriteAlways(always bool) { - b.writeAlways = always -} - -func (b *Bucket) WriteDryRun(dryrun bool) { - b.writeDryRun = dryrun -} - -func (b *Bucket) Object(objName string) *storage.Object { - b.mu.RLock() - defer b.mu.RUnlock() - return b.objects[objName] -} - -func (b *Bucket) Objects() []*storage.Object { - b.mu.RLock() - defer b.mu.RUnlock() - objs := make([]*storage.Object, 0, len(b.objects)) - for _, obj := range b.objects { - objs = append(objs, obj) - } - return objs -} - -func (b *Bucket) Prefixes() []string { - seen := make(map[string]bool) - list := make([]string, 0) - add := func(prefix string) { - for !seen[prefix] { - seen[prefix] = true - list = append(list, prefix) - prefix = NextPrefix(prefix) - } - } - - b.mu.RLock() - defer b.mu.RUnlock() - for prefix := range b.prefixes { - add(prefix) - } - for objName := range b.objects { - add(NextPrefix(objName)) - } - - return list -} - -func (b *Bucket) Len() int { - b.mu.RLock() - defer b.mu.RUnlock() - return len(b.objects) -} - -func (b *Bucket) addObject(obj *storage.Object) { - if obj.Bucket != b.name { - panic(fmt.Errorf("adding gs://%s/%s to bucket %s", obj.Bucket, obj.Name, b.name)) - } - b.mu.Lock() - defer b.mu.Unlock() - b.objects[obj.Name] = obj -} - -func (b *Bucket) addObjects(objs *storage.Objects) { - b.mu.Lock() - defer b.mu.Unlock() - for _, obj := range objs.Items { - if obj.Bucket != b.name { - panic(fmt.Errorf("adding gs://%s/%s to bucket %s", obj.Bucket, obj.Name, b.name)) - } - b.objects[obj.Name] = obj - } - for _, pfx := range objs.Prefixes { - b.prefixes[pfx] = struct{}{} - } -} - -func (b *Bucket) delObject(objName string) { - b.mu.Lock() - defer b.mu.Unlock() - delete(b.objects, objName) -} - -func (b *Bucket) mkURL(obj interface{}) *url.URL { - switch v := obj.(type) { - case string: - u := b.URL() - u.Path = v - return u - case *storage.Object: - u := b.URL() - u.Path = v.Name - if v.Bucket != "" { - u.Host = v.Bucket - } - return u - case *url.URL: - return v - case nil: - return b.URL() - default: - panic(fmt.Errorf("unknown type %T", obj)) - } -} - -func (b *Bucket) apiErr(op string, obj interface{}, e error) error { - if _, ok := e.(*googleapi.Error); ok { - return &Error{Op: op, URL: b.mkURL(obj).String(), Err: e} - } - return e -} - -func (b *Bucket) Fetch(ctx context.Context) error { - return b.FetchPrefix(ctx, b.prefix, true) -} - -func (b *Bucket) FetchPrefix(ctx context.Context, prefix string, recursive bool) error { - prefix = FixPrefix(prefix) - req := b.service.Objects.List(b.name) - if prefix != "" { - req.Prefix(prefix) - } - if !recursive { - req.Delimiter("/") - } - - n := 0 - p := 0 - u := b.URL() - u.Path = prefix - add := func(objs *storage.Objects) error { - b.addObjects(objs) - n += len(objs.Items) - plog.Infof("Found %d objects under %s", n, u) - if len(objs.Prefixes) > 0 { - p += len(objs.Prefixes) - plog.Infof("Found %d directories under %s", p, u) - } - return nil - } - - plog.Noticef("Fetching %s", u) - - if err := req.Pages(ctx, add); err != nil { - return b.apiErr("storage.objects.list", nil, err) - } - - if prefix == "" { - return nil - } - - // In order to pair well with HTML indexing we need to check for - // a redirect object (prefix minus trailing slash). The list - // request needs the slash get foo/bar/* but not foo/barbaz. - redirName := strings.TrimSuffix(prefix, "/") - if b.Object(redirName) != nil { - return nil - } - - redirReq := b.service.Objects.Get(b.name, redirName) - redirReq.Context(ctx) - redirObj, err := redirReq.Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == 404 { - return nil // missing is perfectly valid - } else if err != nil { - return b.apiErr("storage.objects.get", redirName, err) - } - - b.addObject(redirObj) - return nil -} - -func (b *Bucket) Upload(ctx context.Context, obj *storage.Object, media io.ReaderAt) error { - // Calculate the checksum to enable upload integrity checking. - if obj.Crc32c == "" { - obj = dupObj(obj) // avoid editing the original - if err := crcSum(obj, media); err != nil { - return err - } - } - - old := b.Object(obj.Name) - if !b.writeAlways && crcEq(old, obj) { - return nil // up to date! - } - if b.writeDryRun { - plog.Noticef("Would write %s", b.mkURL(obj)) - return nil - } - - req := b.service.Objects.Insert(b.name, obj) - // ResumableMedia is documented as deprecated in favor of Media - // but Media's retry support was bad and got temporarily removed. - // https://github.com/google/google-api-go-client/commit/9737cc9e103c00d06a8f3993361dec083df3d252 - req.ResumableMedia(ctx, media, int64(obj.Size), obj.ContentType) - - // Watch out for unexpected conflicting updates. - if old != nil { - req.IfGenerationMatch(old.Generation) - } - - plog.Noticef("Writing %s", b.mkURL(obj)) - - inserted, err := req.Do() - if err != nil { - return b.apiErr("storage.objects.insert", obj, err) - } - - b.addObject(inserted) - return nil -} - -func (b *Bucket) Copy(ctx context.Context, src *storage.Object, dstName string) error { - if src.Bucket == "" { - panic(fmt.Errorf("src.Bucket is blank: %#v", src)) - } - - old := b.Object(dstName) - if !b.writeAlways && crcEq(old, src) { - return nil // up to date! - } - - // It does work to pass src directly to the Rewrite API call, the - // name and bucket values don't really matter, they just cannot be - // blank for whatever reason. We make a copy just to get consistent - // results, e.g. always use the destination bucket's default ACL. - dst := dupObj(src) - dst.Name = dstName - dst.Bucket = b.name - - if b.writeDryRun { - plog.Noticef("Would copy %s to %s", b.mkURL(src), b.mkURL(dst)) - return nil - } - - req := b.service.Objects.Rewrite( - src.Bucket, src.Name, dst.Bucket, dst.Name, src) - req.Context(ctx) - - // Watch out for unexpected conflicting updates. - if old != nil { - req.IfGenerationMatch(old.Generation) - } - if src.Generation != 0 { - req.IfSourceGenerationMatch(src.Generation) - } - - plog.Noticef("Copying %s to %s", b.mkURL(src), b.mkURL(dst)) - - for { - resp, err := req.Do() - if err != nil { - return b.apiErr("storage.objects.rewrite", dst, err) - } - if resp.Done { - b.addObject(resp.Resource) - return nil - } - req.RewriteToken(resp.RewriteToken) - } -} - -func (b *Bucket) Delete(ctx context.Context, objName string) error { - if b.writeDryRun { - plog.Noticef("Would delete %s", b.mkURL(objName)) - return nil - } - - req := b.service.Objects.Delete(b.name, objName) - req.Context(ctx) - - // Watch out for unexpected conflicting updates. - if old := b.Object(objName); old != nil { - req.IfGenerationMatch(old.Generation) - req.IfMetagenerationMatch(old.Metageneration) - } - - plog.Noticef("Deleting %s", b.mkURL(objName)) - - if err := req.Do(); err != nil { - return b.apiErr("storage.objects.delete", objName, err) - } - - b.delObject(objName) - return nil -} - -// FixPrefix ensures non-empty paths end in a slash but never start with one. -func FixPrefix(p string) string { - if p != "" && !strings.HasSuffix(p, "/") { - p += "/" - } - return strings.TrimPrefix(p, "/") -} - -// NextPrefix chops off the final component of an object name or prefix. -func NextPrefix(name string) string { - prefix, _ := path.Split(strings.TrimSuffix(name, "/")) - return prefix -} diff --git a/mantle/storage/bucket_test.go b/mantle/storage/bucket_test.go deleted file mode 100644 index f96f5159..00000000 --- a/mantle/storage/bucket_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "fmt" - "net/http" - "testing" - - "google.golang.org/api/storage/v1" -) - -type fakeTransport struct{} - -func (f fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { - return nil, fmt.Errorf("FAKE! %s %s", req.Method, req.URL) -} - -func FakeBucket(bucketURL string) (*Bucket, error) { - return NewBucket(&http.Client{Transport: fakeTransport{}}, bucketURL) -} - -func (b *Bucket) AddObject(obj *storage.Object) { - b.addObject(obj) -} - -func TestBucketURL(t *testing.T) { - if _, err := FakeBucket("http://bucket/"); err != UnknownScheme { - t.Errorf("Unexpected error: %v", err) - } - - if _, err := FakeBucket("gs:///"); err != UnknownBucket { - t.Errorf("Unexpected error: %v", err) - } - - for _, test := range []struct { - url string - name string - prefix string - }{ - {"gs://bucket", "bucket", ""}, - {"gs://bucket/", "bucket", ""}, - {"gs://bucket/prefix", "bucket", "prefix/"}, - {"gs://bucket/prefix/", "bucket", "prefix/"}, - {"gs://bucket/prefix/foo", "bucket", "prefix/foo/"}, - {"gs://bucket/prefix/foo/", "bucket", "prefix/foo/"}, - } { - - bkt, err := FakeBucket(test.url) - if err != nil { - t.Errorf("Unexpected error for url %q: %v", test.url, err) - continue - } - - if bkt.Name() != test.name { - t.Errorf("Unexpected name for url %q: %q", test.url, bkt.Name()) - } - if bkt.Prefix() != test.prefix { - t.Errorf("Unexpected name for url %q: %q", test.url, bkt.Prefix()) - } - } - -} - -func ExampleNextPrefix() { - fmt.Println(NextPrefix("foo/bar/baz")) - fmt.Println(NextPrefix("foo/bar/")) - fmt.Println(NextPrefix("foo/bar")) - fmt.Println(NextPrefix("foo/")) - fmt.Println(NextPrefix("foo")) - fmt.Println(NextPrefix("")) - // Output: - // foo/bar/ - // foo/ - // foo/ - // - // -} diff --git a/mantle/storage/index/indexer.go b/mantle/storage/index/indexer.go deleted file mode 100644 index fc7a1023..00000000 --- a/mantle/storage/index/indexer.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import ( - "bytes" - "html/template" - "net/url" - "path" - "strings" - - "golang.org/x/net/context" - gs "google.golang.org/api/storage/v1" - - "github.com/coreos/mantle/storage" -) - -var ( - indexTemplate *template.Template -) - -const ( - indexText = ` - - {{.Title}} - - - -

{{.Title}}

- {{range .SubDirs}} - [dir] {{.|base}}
- {{end}} - {{range .Objects}} - [file] {{.Name|base}}
- {{end}} - - -` -) - -func init() { - indexTemplate = template.New("index") - indexTemplate.Funcs(template.FuncMap{"base": path.Base}) - template.Must(indexTemplate.Parse(indexText)) -} - -type Indexer struct { - bucket *storage.Bucket - prefix string - empty bool - Title string - SubDirs []string - Objects []*gs.Object -} - -func (t *IndexTree) Indexer(name, prefix string) *Indexer { - return &Indexer{ - bucket: t.bucket, - prefix: prefix, - empty: !t.prefixes[prefix], - Title: name + "/" + prefix, - SubDirs: t.subdirs[prefix], - Objects: t.objects[prefix], - } -} - -func (i *Indexer) Empty() bool { - return i.empty -} - -func (i *Indexer) maybeDelete(ctx context.Context, name string) error { - if name == "" || i.bucket.Object(name) == nil { - return nil - } - return i.bucket.Delete(ctx, name) -} - -func (i *Indexer) DeleteRedirect(ctx context.Context) error { - return i.maybeDelete(ctx, strings.TrimSuffix(i.prefix, "/")) -} - -func (i *Indexer) DeleteDirectory(ctx context.Context) error { - return i.maybeDelete(ctx, i.prefix) -} - -func (i *Indexer) DeleteIndexHTML(ctx context.Context) error { - return i.maybeDelete(ctx, i.prefix+"index.html") -} - -func (i *Indexer) UpdateRedirect(ctx context.Context) error { - if i.prefix == "" { - return nil - } - - name := strings.TrimSuffix(i.prefix, "/") - obj := gs.Object{ - Name: name, - ContentType: "text/html", - CacheControl: "public, max-age=60", - } - - link := escapePath(path.Base(name)) - buf := bytes.NewBuffer(make([]byte, 0, 256)) - buf.WriteString("\n") - // TODO: include - // I suspect that's only meaningful if we switch to absolute paths - buf.WriteString(`\n\n") - - return i.bucket.Upload(ctx, &obj, bytes.NewReader(buf.Bytes())) -} - -func (i *Indexer) updateHTML(ctx context.Context, suffix string) error { - obj := gs.Object{ - Name: i.prefix + suffix, - ContentType: "text/html", - CacheControl: "public, max-age=60", - } - - buf := bytes.Buffer{} - if err := indexTemplate.Execute(&buf, i); err != nil { - return err - } - - return i.bucket.Upload(ctx, &obj, bytes.NewReader(buf.Bytes())) -} - -func (i *Indexer) UpdateDirectoryHTML(ctx context.Context) error { - if i.prefix == "" { - return nil - } - - return i.updateHTML(ctx, "") -} - -func (i *Indexer) UpdateIndexHTML(ctx context.Context) error { - return i.updateHTML(ctx, "index.html") -} - -func escapePath(path string) string { - u := url.URL{Path: path} - return u.EscapedPath() -} diff --git a/mantle/storage/index/job.go b/mantle/storage/index/job.go deleted file mode 100644 index 5a38b33c..00000000 --- a/mantle/storage/index/job.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import ( - "golang.org/x/net/context" - - "github.com/coreos/mantle/lang/worker" - "github.com/coreos/mantle/storage" -) - -type IndexJob struct { - Bucket *storage.Bucket - - name *string - prefix *string - enableDirectoryHTML bool - enableIndexHTML bool - enableDelete bool - notRecursive bool // inverted because recursive is default -} - -func NewIndexJob(bucket *storage.Bucket) *IndexJob { - return &IndexJob{Bucket: bucket} -} - -// Name overrides Bucket's name in page titles. -func (ij *IndexJob) Name(name string) { - ij.name = &name -} - -// Prefix overrides Bucket's default prefix. -func (ij *IndexJob) Prefix(p string) { - p = storage.FixPrefix(p) - ij.prefix = &p -} - -// DirectoryHTML toggles generation of HTML pages to mimic directories. -func (ij *IndexJob) DirectoryHTML(enable bool) { - ij.enableDirectoryHTML = enable -} - -// IndexHTML toggles generation of index.html pages for each directory. -func (ij *IndexJob) IndexHTML(enable bool) { - ij.enableIndexHTML = enable -} - -// Delete toggles deletion of stale indexes for now empty directories. -func (ij *IndexJob) Delete(enable bool) { - ij.enableDelete = enable -} - -// Recursive toggles generation of indexes for subdirectories (the default). -func (sj *IndexJob) Recursive(enable bool) { - sj.notRecursive = !enable -} - -func (ij *IndexJob) doDir(wg *worker.WorkerGroup, ix *Indexer) error { - if ij.enableDirectoryHTML && !ix.Empty() { - if err := wg.Start(ix.UpdateRedirect); err != nil { - return err - } - if err := wg.Start(ix.UpdateDirectoryHTML); err != nil { - return err - } - } else if ij.enableDelete { - if err := wg.Start(ix.DeleteRedirect); err != nil { - return err - } - if err := wg.Start(ix.DeleteDirectory); err != nil { - return err - } - } - - if ij.enableIndexHTML && !ix.Empty() { - if err := wg.Start(ix.UpdateIndexHTML); err != nil { - return err - } - } else if ij.enableDelete { - if err := wg.Start(ix.DeleteIndexHTML); err != nil { - return err - } - } - - return nil -} - -func (ij *IndexJob) Do(ctx context.Context) error { - if ij.name == nil { - name := ij.Bucket.Name() - ij.name = &name - } - if ij.prefix == nil { - prefix := ij.Bucket.Prefix() - ij.prefix = &prefix - } - - tree := NewIndexTree(ij.Bucket, ij.notRecursive) - wg := worker.NewWorkerGroup(ctx, storage.MaxConcurrentRequests) - - if ij.notRecursive { - ix := tree.Indexer(*ij.name, *ij.prefix) - return wg.WaitError(ij.doDir(wg, ix)) - } - - for _, prefix := range tree.Prefixes(*ij.prefix) { - ix := tree.Indexer(*ij.name, prefix) - if err := ij.doDir(wg, ix); err != nil { - return wg.WaitError(err) - } - } - - return wg.Wait() -} diff --git a/mantle/storage/index/sync.go b/mantle/storage/index/sync.go deleted file mode 100644 index 1176969c..00000000 --- a/mantle/storage/index/sync.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import ( - "golang.org/x/net/context" - - gs "google.golang.org/api/storage/v1" - - "github.com/coreos/mantle/storage" -) - -type SyncIndexJob struct { - storage.SyncJob - IndexJob - - srcIndexes IndexSet - dstIndexes IndexSet -} - -func NewSyncIndexJob(src, dst *storage.Bucket) *SyncIndexJob { - si := &SyncIndexJob{ - SyncJob: storage.SyncJob{ - Source: src, - Destination: dst, - }, - IndexJob: IndexJob{ - Bucket: dst, - }, - srcIndexes: NewIndexSet(src), - dstIndexes: NewIndexSet(dst), - } - si.SyncJob.SourceFilter(si.srcIndexes.NotIndex) - si.SyncJob.DeleteFilter(si.dstIndexes.NotIndex) - return si -} - -// DestinationPrefix overrides the Destination bucket's default prefix. -func (si *SyncIndexJob) DestinationPrefix(p string) { - si.SyncJob.DestinationPrefix(p) - si.IndexJob.Prefix(p) -} - -// Prefix is an alias for DestinationPrefix() -func (si *SyncIndexJob) Prefix(p string) { - si.DestinationPrefix(p) -} - -// SourceFilter selects which objects to copy from Source. -func (si *SyncIndexJob) SourceFilter(f storage.Filter) { - si.SyncJob.SourceFilter(func(obj *gs.Object) bool { - return f(obj) && si.srcIndexes.NotIndex(obj) - }) -} - -// DeleteFilter selects which objects may be pruned from Destination. -func (si *SyncIndexJob) DeleteFilter(f storage.Filter) { - si.SyncJob.DeleteFilter(func(obj *gs.Object) bool { - return f(obj) && si.dstIndexes.NotIndex(obj) - }) -} - -// Delete enables deletion of extra objects and indexes from Destination. -func (si *SyncIndexJob) Delete(enable bool) { - si.SyncJob.Delete(enable) - si.IndexJob.Delete(enable) -} - -// Recursive toggles copying/indexing subdirectories (the default). -func (si *SyncIndexJob) Recursive(enable bool) { - si.SyncJob.Recursive(enable) - si.IndexJob.Recursive(enable) -} - -func (sj *SyncIndexJob) Do(ctx context.Context) error { - if err := sj.SyncJob.Do(ctx); err != nil { - return err - } - return sj.IndexJob.Do(ctx) -} diff --git a/mantle/storage/index/tree.go b/mantle/storage/index/tree.go deleted file mode 100644 index 56df8c62..00000000 --- a/mantle/storage/index/tree.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import ( - "strings" - - gs "google.golang.org/api/storage/v1" - - "github.com/coreos/mantle/lang/natsort" - "github.com/coreos/mantle/storage" -) - -type IndexTree struct { - bucket *storage.Bucket - prefixes map[string]bool - subdirs map[string][]string - objects map[string][]*gs.Object -} - -func NewIndexTree(bucket *storage.Bucket, includeEmpty bool) *IndexTree { - t := &IndexTree{ - bucket: bucket, - prefixes: make(map[string]bool), - subdirs: make(map[string][]string), - objects: make(map[string][]*gs.Object), - } - - for _, prefix := range bucket.Prefixes() { - if includeEmpty { - t.addDir(prefix) - } else { - t.prefixes[prefix] = false // initialize as empty - } - } - - indexes := NewIndexSet(bucket) - for _, obj := range bucket.Objects() { - if indexes.NotIndex(obj) { - t.addObj(obj) - } - } - - for _, dirs := range t.subdirs { - natsort.Strings(dirs) - } - - for _, objs := range t.objects { - storage.SortObjects(objs) - } - - return t -} - -func (t *IndexTree) addObj(obj *gs.Object) { - prefix := storage.NextPrefix(obj.Name) - t.objects[prefix] = append(t.objects[prefix], obj) - t.addDir(prefix) -} - -func (t *IndexTree) addDir(prefix string) { - for !t.prefixes[prefix] { - t.prefixes[prefix] = true // mark as not empty - if prefix == "" { - return - } - parent := storage.NextPrefix(prefix) - t.subdirs[parent] = append(t.subdirs[parent], prefix) - prefix = storage.NextPrefix(prefix) - } -} - -func (t *IndexTree) Prefixes(dir string) []string { - prefixes := make([]string, 0, len(t.prefixes)) - for prefix := range t.prefixes { - if strings.HasPrefix(prefix, dir) { - prefixes = append(prefixes, prefix) - } - } - return prefixes -} diff --git a/mantle/storage/object.go b/mantle/storage/object.go deleted file mode 100644 index c1f8ffba..00000000 --- a/mantle/storage/object.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "encoding/base64" - "hash/crc32" - "io" - "sort" - - "google.golang.org/api/storage/v1" - - "github.com/coreos/mantle/lang/natsort" - "github.com/coreos/mantle/lang/reader" -) - -// SortObjects orders Objects by Name using natural sorting. -func SortObjects(objs []*storage.Object) { - sort.Slice(objs, func(i, j int) bool { - return natsort.Less(objs[i].Name, objs[j].Name) - }) -} - -// Update CRC32c and Size in the given Object -func crcSum(obj *storage.Object, media io.ReaderAt) error { - c := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - n, err := io.Copy(c, reader.AtReader(media)) - if err != nil { - return err - } - obj.Size = uint64(n) - obj.Crc32c = base64.StdEncoding.EncodeToString(c.Sum(nil)) - return nil -} - -// Judges whether two Objects are equal based on size and CRC. To guard against -// uninitialized fields, nil objects and empty CRC values are never equal. -func crcEq(a, b *storage.Object) bool { - if a == nil || b == nil { - return false - } - if a.Crc32c == "" || b.Crc32c == "" { - return false - } - return a.Size == b.Size && a.Crc32c == b.Crc32c -} - -// Duplicate basic Object metadata, useful for preparing a copy operation. -func dupObj(src *storage.Object) *storage.Object { - dst := &storage.Object{ - Bucket: src.Bucket, - CacheControl: src.CacheControl, - ContentDisposition: src.ContentDisposition, - ContentEncoding: src.ContentEncoding, - ContentLanguage: src.ContentLanguage, - ContentType: src.ContentType, - Crc32c: src.Crc32c, - Md5Hash: src.Md5Hash, - Name: src.Name, - Size: src.Size, - } - if len(src.Metadata) > 0 { - dst.Metadata = make(map[string]string) - for k, v := range src.Metadata { - dst.Metadata[k] = v - } - } - return dst -} diff --git a/mantle/storage/object_test.go b/mantle/storage/object_test.go deleted file mode 100644 index 899df1fa..00000000 --- a/mantle/storage/object_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "strings" - "testing" - - "google.golang.org/api/storage/v1" -) - -const ( - testPage = ` - - -` - testPageCRC = "xH9jaw==" - testPageMD5 = "2a6rirkVBEsl0bzTOzNtzA==" - testPageSize = 83 -) - -func TestSortObjects(t *testing.T) { - slice := []*storage.Object{ - &storage.Object{Name: "a2"}, - &storage.Object{Name: "a10"}, - &storage.Object{Name: "a1"}, - } - SortObjects(slice) - if slice[0].Name != "a1" || - slice[1].Name != "a2" || - slice[2].Name != "a10" { - t.Errorf("Undexpected order: %#v", slice) - } -} - -func TestCRCSum(t *testing.T) { - obj := storage.Object{} - if err := crcSum(&obj, strings.NewReader(testPage)); err != nil { - t.Fatal(err) - } - if obj.Crc32c != testPageCRC { - t.Errorf("Bad CRC32c: %q != %q", obj.Crc32c, testPageCRC) - } - if obj.Size != testPageSize { - t.Errorf("Bad Size: %d != %d", obj.Size, testPageSize) - } -} - -func TestCRCEq(t *testing.T) { - obj := storage.Object{Crc32c: testPageCRC, Size: testPageSize} - if crcEq(&obj, nil) { - t.Errorf("%#v equal to nil", obj) - } - if crcEq(nil, &obj) { - t.Errorf("nil equal to %#v", obj) - } - if crcEq(nil, nil) { - t.Error("nil not equal to nil") - } - if crcEq(&obj, &storage.Object{Crc32c: testPageCRC}) { - t.Errorf("%#v equal ignored size", obj) - } - if crcEq(&obj, &storage.Object{Size: testPageSize}) { - t.Errorf("%#v equal ignored blank CRC", obj) - } - if !crcEq(&obj, &obj) { - t.Errorf("%#v not equal to itself", obj) - } -} - -func TestCRCSumAndEq(t *testing.T) { - var a, b storage.Object - r := strings.NewReader(testPage) // reading twice should work - if err := crcSum(&a, r); err != nil { - t.Fatal(err) - } - r.Seek(0, 0) - if err := crcSum(&b, r); err != nil { - t.Fatal(err) - } - if !crcEq(&a, &b) { - t.Errorf("%#v not equal to %#v", a, b) - } - c := storage.Object{Crc32c: testPageCRC, Size: testPageSize} - if !crcEq(&a, &c) { - t.Errorf("%#v not equal to %#v", a, c) - } -} diff --git a/mantle/storage/storage.go b/mantle/storage/storage.go deleted file mode 100644 index 64e72624..00000000 --- a/mantle/storage/storage.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// storage provides a high level interface for Google Cloud Storage -package storage - -import ( - "github.com/coreos/pkg/capnslog" -) - -// Arbitrary limit on the number of concurrent remote API requests. -const MaxConcurrentRequests = 12 - -var plog = capnslog.NewPackageLogger("github.com/coreos/mantle", "storage") diff --git a/mantle/storage/sync.go b/mantle/storage/sync.go deleted file mode 100644 index 472e9c7d..00000000 --- a/mantle/storage/sync.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "strings" - - "golang.org/x/net/context" - gs "google.golang.org/api/storage/v1" - - "github.com/coreos/mantle/lang/worker" -) - -// Filter is a type of function that returns true if an object should be -// included in a given operation or false if it should be excluded/ignored. -type Filter func(*gs.Object) bool - -type SyncJob struct { - Source *Bucket - Destination *Bucket - - sourcePrefix *string - destinationPrefix *string - sourceFilter Filter - deleteFilter Filter - enableDelete bool - notRecursive bool // inverted because recursive is default -} - -func Sync(ctx context.Context, src, dst *Bucket) error { - job := SyncJob{Source: src, Destination: dst} - return job.Do(ctx) -} - -// SourcePrefix overrides the Source bucket's default prefix. -func (sj *SyncJob) SourcePrefix(p string) { - p = FixPrefix(p) - sj.sourcePrefix = &p -} - -// DestinationPrefix overrides the Destination bucket's default prefix. -func (sj *SyncJob) DestinationPrefix(p string) { - p = FixPrefix(p) - sj.destinationPrefix = &p -} - -// SourceFilter selects which objects to copy from Source. -func (sj *SyncJob) SourceFilter(f Filter) { - sj.sourceFilter = f -} - -// DeleteFilter selects which objects may be pruned from Destination. -func (sj *SyncJob) DeleteFilter(f Filter) { - sj.deleteFilter = f -} - -// Delete toggles deletion of extra objects from Destination. -func (sj *SyncJob) Delete(enable bool) { - sj.enableDelete = enable -} - -// Recursive toggles copying subdirectories from Source (the default). -func (sj *SyncJob) Recursive(enable bool) { - sj.notRecursive = !enable -} - -func (sj *SyncJob) Do(ctx context.Context) error { - if sj.sourcePrefix == nil { - prefix := sj.Source.Prefix() - sj.sourcePrefix = &prefix - } - if sj.destinationPrefix == nil { - prefix := sj.Destination.Prefix() - sj.destinationPrefix = &prefix - } - - // Assemble a set of existing objects which may be deleted. - oldNames := make(map[string]struct{}) - for _, oldObj := range sj.Destination.Objects() { - if !sj.hasPrefix(oldObj.Name, *sj.destinationPrefix) { - continue - } - if sj.deleteFilter != nil && !sj.deleteFilter(oldObj) { - continue - } - oldNames[oldObj.Name] = struct{}{} - } - - wg := worker.NewWorkerGroup(ctx, MaxConcurrentRequests) - for _, srcObj := range sj.Source.Objects() { - if !sj.hasPrefix(srcObj.Name, *sj.sourcePrefix) { - continue - } - if sj.sourceFilter != nil && !sj.sourceFilter(srcObj) { - continue - } - - obj := srcObj // for the sake of the closure - name := sj.newName(srcObj) - - worker := func(c context.Context) error { - return sj.Destination.Copy(c, obj, name) - } - if err := wg.Start(worker); err != nil { - return wg.WaitError(err) - } - - // Drop from set of deletion candidates. - delete(oldNames, name) - } - - for oldName := range oldNames { - name := oldName // for the sake of the closure - worker := func(c context.Context) error { - return sj.Destination.Delete(c, name) - } - if err := wg.Start(worker); err != nil { - return wg.WaitError(err) - } - } - - return wg.Wait() -} - -func (sj *SyncJob) hasPrefix(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if sj.notRecursive { - suffix := name[len(prefix):] - if strings.Contains(suffix, "/") { - return false - } - } - return true -} - -func (sj *SyncJob) newName(srcObj *gs.Object) string { - return *sj.destinationPrefix + srcObj.Name[len(*sj.sourcePrefix):] -} diff --git a/mantle/system/anonfile_linux.go b/mantle/system/anonfile_linux.go deleted file mode 100644 index a4c841b1..00000000 --- a/mantle/system/anonfile_linux.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - - "golang.org/x/sys/unix" -) - -// LinkFile creates a new link to an open File instead of an existing -// name as os.Link and friends do. Particularly useful for making a file -// created by AnonymousFile accessible in the filesystem. As with Link the -// caller should ensure the new name is on the same filesystem. -func LinkFile(file *os.File, name string) error { - // The AT_EMPTY_PATH version needs CAP_DAC_READ_SEARCH but using - // /proc and AT_SYMLINK_FOLLOW does not and is the "normal" way. - //Linkat(int(a.Fd()), "", AT_FDCWD, name, AT_EMPTY_PATH) - err := unix.Linkat( - unix.AT_FDCWD, fmt.Sprintf("/proc/self/fd/%d", file.Fd()), - unix.AT_FDCWD, name, unix.AT_SYMLINK_FOLLOW) - if err != nil { - return &os.LinkError{ - Op: "linkat", - Old: file.Name(), - New: name, - Err: err, - } - } - return nil -} - -// AnonymousFile creates an unlinked temporary file in the given directory -// or the default temporary directory if unspecified. Since the file has no -// name, the file's Name method does not return a real path. The file may -// be later linked into the filesystem for safe keeping using LinkFile. -func AnonymousFile(dir string) (*os.File, error) { - return tmpFile(dir, false) -} - -// PrivateFile creates an unlinked temporary file in the given directory -// or the default temporary directory if unspecified. Unlike AnonymousFile, -// the opened file cannot be linked into the filesystem later. -func PrivateFile(dir string) (*os.File, error) { - return tmpFile(dir, true) -} - -func tmpFile(dir string, private bool) (*os.File, error) { - if dir == "" { - dir = os.TempDir() - } - - flags := unix.O_RDWR | unix.O_TMPFILE | unix.O_CLOEXEC - if private { - flags |= unix.O_EXCL - } - - tmpPath := filepath.Join(dir, "(unlinked)") - tmpFd, err := unix.Open(dir, flags, 0600) - if err != nil { - return nil, &os.PathError{ - Op: "openat", - Path: tmpPath, - Err: err, - } - } - - return os.NewFile(uintptr(tmpFd), tmpPath), nil -} - -// IsOpNotSupported reports true if the underlying error was EOPNOTSUPP. -// Useful for checking if the host or filesystem lacks O_TMPFILE support. -func IsOpNotSupported(err error) bool { - if oserr, ok := err.(*os.PathError); ok { - if errno, ok := oserr.Err.(syscall.Errno); ok { - if errno == syscall.EOPNOTSUPP { - return true - } - } - } - return false -} diff --git a/mantle/system/anonfile_linux_test.go b/mantle/system/anonfile_linux_test.go deleted file mode 100644 index 18dc0b25..00000000 --- a/mantle/system/anonfile_linux_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -func TestAnonymousFile(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - anon, err := AnonymousFile(tmp) - if IsOpNotSupported(err) { - t.Skip("O_TMPFILE not supported") - } else if err != nil { - t.Fatal(err) - } - defer anon.Close() - - info, err := ioutil.ReadDir(tmp) - if err != nil { - t.Fatal(err) - } - if len(info) != 0 { - t.Errorf("%s not empty: %v", tmp, info) - } - - name := filepath.Join(tmp, "name") - if err := LinkFile(anon, name); err != nil { - t.Errorf("Link failed: %v", err) - } - - info, err = ioutil.ReadDir(tmp) - if err != nil { - t.Fatal(err) - } - if len(info) != 1 || info[0].Name() != "name" { - t.Errorf("%s has unexpected contents: %v", tmp, info) - } -} - -func TestLinkFile(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - orig, err := ioutil.TempFile(tmp, "") - if err != nil { - t.Fatal(err) - } - defer orig.Close() - - info, err := ioutil.ReadDir(tmp) - if err != nil { - t.Fatal(err) - } - if len(info) != 1 || info[0].Name() != filepath.Base(orig.Name()) { - t.Fatalf("%s has unexpected contents: %v", tmp, info) - } - - // LinkFile while orig still exists should work - if err := LinkFile(orig, filepath.Join(tmp, "name1")); err != nil { - t.Errorf("Link failed: %v", err) - } - - if err := os.Remove(orig.Name()); err != nil { - t.Fatal(err) - } - - // name1 is keeping orig alive so this still works - if err := LinkFile(orig, filepath.Join(tmp, "name2")); err != nil { - t.Errorf("Link failed: %v", err) - } - - if err := os.Remove(filepath.Join(tmp, "name1")); err != nil { - t.Fatal(err) - } - if err := os.Remove(filepath.Join(tmp, "name2")); err != nil { - t.Fatal(err) - } - - // LinkFile after orig is removed doesn't work which is a - // difference between how normal files and O_TMPFILE works. - if err := LinkFile(orig, filepath.Join(tmp, "name3")); err == nil { - t.Error("Linking to removed file unexpectedly worked!") - } -} - -func TestPrivateFile(t *testing.T) { - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - priv, err := PrivateFile(tmp) - if err != nil { - // Travis is an unfun stick in the mud and gives us - // an ancient system lacking O_TMPFILE support. - if oserr, ok := err.(*os.PathError); ok { - if errno, ok := oserr.Err.(syscall.Errno); ok { - if errno == syscall.EOPNOTSUPP { - t.Skip("O_TMPFILE not supported") - } - } - } - t.Fatal(err) - } - defer priv.Close() - - info, err := ioutil.ReadDir(tmp) - if err != nil { - t.Fatal(err) - } - if len(info) != 0 { - t.Errorf("%s not empty: %v", tmp, info) - } - - if err := LinkFile(priv, filepath.Join(tmp, "name")); err == nil { - t.Error("Linking to private file unexpectedly worked!") - } -} diff --git a/mantle/system/copy.go b/mantle/system/copy.go index cb87d48d..48006f45 100644 --- a/mantle/system/copy.go +++ b/mantle/system/copy.go @@ -18,7 +18,6 @@ import ( "fmt" "io" "os" - "path/filepath" ) // CopyRegularFile copies a file in place, updates are not atomic. If @@ -55,12 +54,3 @@ func CopyRegularFile(src, dest string) (err error) { _, err = io.Copy(destFile, srcFile) return err } - -// InstallRegularFile copies a file, creating any parent directories. -func InstallRegularFile(src, dest string) error { - destDir := filepath.Dir(dest) - if err := os.MkdirAll(destDir, 0755); err != nil { - return err - } - return CopyRegularFile(src, dest) -} diff --git a/mantle/system/copy_test.go b/mantle/system/copy_test.go index 6f380ea4..12cc3a72 100644 --- a/mantle/system/copy_test.go +++ b/mantle/system/copy_test.go @@ -30,6 +30,9 @@ func checkFile(t *testing.T, path string, data []byte, mode os.FileMode) { defer file.Close() info, err := file.Stat() + if err != nil { + t.Fatal(err) + } if info.Mode() != mode { t.Fatalf("Unexpected mode: %s != %s %s", info.Mode(), mode, path) } @@ -45,11 +48,7 @@ func checkFile(t *testing.T, path string, data []byte, mode os.FileMode) { func TestCopyRegularFile(t *testing.T) { data := []byte("test") - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) + tmp := t.TempDir() src := filepath.Join(tmp, "src") if err := ioutil.WriteFile(src, data, 0600); err != nil { @@ -72,24 +71,3 @@ func TestCopyRegularFile(t *testing.T) { } checkFile(t, copy2, data, 0640) } - -func TestInstallRegularFile(t *testing.T) { - data := []byte("test") - tmp, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - src := filepath.Join(tmp, "src") - if err := ioutil.WriteFile(src, data, 0600); err != nil { - t.Fatal(err) - } - checkFile(t, src, data, 0600) - - copy1 := filepath.Join(tmp, "subdir", "copy1") - if err := InstallRegularFile(src, copy1); err != nil { - t.Fatal(err) - } - checkFile(t, copy1, data, 0600) -} diff --git a/mantle/system/exec/exec.go b/mantle/system/exec/exec.go index fb3369a6..d7ebc043 100644 --- a/mantle/system/exec/exec.go +++ b/mantle/system/exec/exec.go @@ -47,6 +47,9 @@ type Cmd interface { // Simplified wrapper for Process.Pid Pid() int + + // Simplified wrapper to know if a process was signaled + Signaled() bool } // Basic Cmd implementation based on exec.Cmd @@ -93,6 +96,14 @@ func (cmd *ExecCmd) Kill() error { return err } +func (cmd *ExecCmd) Signaled() bool { + if cmd.ProcessState == nil { + return false + } + status := cmd.ProcessState.Sys().(syscall.WaitStatus) + return status.Signaled() +} + func (cmd *ExecCmd) Pid() int { return cmd.Process.Pid } diff --git a/mantle/system/hostname.go b/mantle/system/hostname.go deleted file mode 100644 index 2b181967..00000000 --- a/mantle/system/hostname.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "net" - "os" - "strings" -) - -// FullHostname is a best effort attempt to resolve the canonical FQDN of -// the host. On failure it will fall back to a reasonable looking default -// such as 'localhost.' or 'hostname.invalid.' -func FullHostname() string { - hostname, err := os.Hostname() - if err != nil || hostname == "localhost" || hostname == "(none)" { - return "localhost." - } - fullname, err := net.LookupCNAME(hostname) - if err != nil { - fullname = hostname - if !strings.Contains(fullname, ".") { - fullname += ".invalid." - } - } - return fullname -} diff --git a/mantle/system/mount_linux.go b/mantle/system/mount_linux.go deleted file mode 100644 index 42ccfeb7..00000000 --- a/mantle/system/mount_linux.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "fmt" - "strings" - "syscall" -) - -const ( - // MS_PROPAGATION flags are special operations and cannot be combined - // with each other or any flags other than MS_REC. - MS_PROPAGATION = syscall.MS_SHARED | syscall.MS_SLAVE | syscall.MS_UNBINDABLE | syscall.MS_PRIVATE - // MS_OPERATION flags can be mapped to high level operation names. - MS_OPERATION = MS_PROPAGATION | syscall.MS_BIND | syscall.MS_MOVE | syscall.MS_REC -) - -// map mount flags to higher level "operation" names -var mountOps = map[uintptr]string{ - syscall.MS_BIND: "bind", - syscall.MS_BIND | syscall.MS_REC: "rbind", - syscall.MS_MOVE: "move", - syscall.MS_SILENT: "silent", - syscall.MS_UNBINDABLE: "unbindable", - syscall.MS_UNBINDABLE | syscall.MS_REC: "runbindable", - syscall.MS_PRIVATE: "private", - syscall.MS_PRIVATE | syscall.MS_REC: "rprivate", - syscall.MS_SLAVE: "slave", - syscall.MS_SLAVE | syscall.MS_REC: "rslave", - syscall.MS_SHARED: "shared", - syscall.MS_SHARED | syscall.MS_REC: "rshared", -} - -// map mount flag strings to the numeric value. -// names match mount(8) except where otherwise noted -var mountFlags = map[string]uintptr{ - "ro": syscall.MS_RDONLY, - "nosuid": syscall.MS_NOSUID, - "nodev": syscall.MS_NODEV, - "noexec": syscall.MS_NOEXEC, - "sync": syscall.MS_SYNCHRONOUS, - "remount": syscall.MS_REMOUNT, - "mand": syscall.MS_MANDLOCK, - "dirsync": syscall.MS_DIRSYNC, - "noatime": syscall.MS_NOATIME, - "nodiratime": syscall.MS_NODIRATIME, - "bind": syscall.MS_BIND, - "rbind": syscall.MS_BIND | syscall.MS_REC, - "x-move": syscall.MS_MOVE, // --move - "silent": syscall.MS_SILENT, - "unbindable": syscall.MS_UNBINDABLE, - "runbindable": syscall.MS_UNBINDABLE | syscall.MS_REC, - "private": syscall.MS_PRIVATE, - "rprivate": syscall.MS_PRIVATE | syscall.MS_REC, - "slave": syscall.MS_SLAVE, - "rslave": syscall.MS_SLAVE | syscall.MS_REC, - "shared": syscall.MS_SHARED, - "rshared": syscall.MS_SHARED | syscall.MS_REC, - "relatime": syscall.MS_RELATIME, - "iversion": syscall.MS_I_VERSION, - "strictatime": syscall.MS_STRICTATIME, -} - -// MountError records a mount operation failure, similar to os.PathError -type MountError struct { - Source string - Target string - FsType string - Flags uintptr - Extra string - Err error -} - -func (e *MountError) Error() string { - op, ok := mountOps[e.Flags&MS_OPERATION] - if !ok { - op = "mount" - } - if e.Flags&MS_PROPAGATION != 0 { - // Source is unused for these operations. - return fmt.Sprintf("%s on %s failed: %v", op, e.Target, e.Err) - } - return fmt.Sprintf("%s %s to %s failed: %v", op, e.Source, e.Target, e.Err) -} - -func splitFlags(options string) (uintptr, string) { - var flags uintptr - var extra []string - for _, opt := range strings.Split(options, ",") { - if flag, ok := mountFlags[opt]; ok { - flags |= flag - } else { - extra = append(extra, opt) - } - } - return flags, strings.Join(extra, ",") -} - -func doMount(source, target, fstype string, flags uintptr, extra string) error { - if err := syscall.Mount(source, target, fstype, flags, extra); err != nil { - return &MountError{ - Source: source, - Target: target, - FsType: fstype, - Flags: flags, - Extra: extra, - Err: err, - } - } - return nil -} - -// Mount wraps mount(2) in a similar way to mount(8), accepting both flags -// and filesystem options as a string. Any option not recognized as a flag -// will be passed as a filesystem option. Note that option parsing here is -// simpler than mount(8) and quotes are not considered. -func Mount(source, target, fstype, options string) error { - // A simple default for virtual filesystems - if source == "" { - source = fstype - } - flags, extra := splitFlags(options) - return doMount(source, target, fstype, flags, extra) -} - -// Bind creates a bind mount from source to target. -func Bind(source, target string) error { - return doMount(source, target, "none", syscall.MS_BIND, "") -} - -// ReadOnlyBind creates a read-only bind mount. Note that this must be -// performed in two operations so it is possible for a read-write bind -// to be left behind if the second operation fails. -func ReadOnlyBind(source, target string) error { - var flags uintptr = syscall.MS_BIND - if err := doMount(source, target, "none", flags, ""); err != nil { - return err - } - flags |= syscall.MS_REMOUNT | syscall.MS_RDONLY - return doMount(source, target, "none", flags, "") -} - -// RecursiveBind bind mounts an entire tree under source to target. -func RecursiveBind(source, target string) error { - return doMount(source, target, "none", syscall.MS_BIND|syscall.MS_REC, "") -} - -// Move moves an entire tree under the source mountpoint to target. -func Move(source, target string) error { - return doMount(source, target, "none", syscall.MS_MOVE, "") -} - -// MountPrivate changes a mount point's propagation type to "private" -func MountPrivate(target string) error { - return doMount("none", target, "none", syscall.MS_PRIVATE, "") -} - -// RecursivePrivate changes an entire tree's propagation type to "private" -func RecursivePrivate(target string) error { - return doMount("none", target, "none", syscall.MS_PRIVATE|syscall.MS_REC, "") -} - -// MountShared changes a mount point's propagation type to "shared" -func MountShared(target string) error { - return doMount("none", target, "none", syscall.MS_SHARED, "") -} - -// RecursiveShared changes an entire tree's propagation type to "shared" -func RecursiveShared(target string) error { - return doMount("none", target, "none", syscall.MS_SHARED|syscall.MS_REC, "") -} - -// MountSlave changes a mount point's propagation type to "slave" -func MountSlave(target string) error { - return doMount("none", target, "none", syscall.MS_SLAVE, "") -} - -// RecursiveSlave changes an entire tree's propagation type to "slave" -func RecursiveSlave(target string) error { - return doMount("none", target, "none", syscall.MS_SLAVE|syscall.MS_REC, "") -} diff --git a/mantle/system/nproc.go b/mantle/system/nproc.go index 8ea974e2..b4606387 100644 --- a/mantle/system/nproc.go +++ b/mantle/system/nproc.go @@ -15,13 +15,12 @@ package system import ( - "io/ioutil" + "fmt" + "math" "os" "strconv" "strings" - "github.com/pkg/errors" - "github.com/coreos/mantle/system/exec" ) @@ -29,29 +28,78 @@ import ( // this value is appropriate to pass to e.g. make -J as well as // qemu -smp for example. func GetProcessors() (uint, error) { - // Note this code originated in cmdlib.sh; the git history there will - // have a bit more info. - proc1cgroup, err := ioutil.ReadFile("/proc/1/cgroup") + // Get available CPU count, including sched_getaffinity() + nprocBuf, err := exec.Command("nproc").CombinedOutput() + if err != nil { + return 0, fmt.Errorf("executing nproc: %w", err) + } + nproc, err := strconv.ParseUint(strings.TrimSpace(string(nprocBuf)), 10, 32) + if err != nil { + return 0, fmt.Errorf("parsing nproc output: %w", err) + } + + // Compute the available CPU quota + quota, err := getCpuQuota() if err != nil { - if !os.IsNotExist(err) { - return 0, err + return 0, err + } + + if quota < uint(nproc) { + return quota, nil + } + return uint(nproc), nil +} + +func getCpuQuota() (uint, error) { + // cgroups v2 + buf, err := os.ReadFile("/sys/fs/cgroup/cpu.max") + if err == nil { + vals := strings.SplitN(strings.TrimSpace(string(buf)), " ", 2) + if len(vals) != 2 { + return 0, fmt.Errorf("invalid cpu.max value") } - } else { - // only use 1 core on kubernetes since we can't determine how much we can actually use - if strings.Contains(string(proc1cgroup), "kubepods") { - return 1, nil + if vals[0] != "max" { + quota, err := strconv.ParseUint(vals[0], 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid CPU quota: %w", err) + } + period, err := strconv.ParseUint(vals[1], 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid CPU period: %w", err) + } + if quota > 0 && period > 0 { + return uint((quota + period - 1) / period), nil + } } + } else if !os.IsNotExist(err) { + return 0, fmt.Errorf("reading cpu.max: %w", err) } - nprocBuf, err := exec.Command("nproc").CombinedOutput() + // cgroups v1 + buf, err = os.ReadFile("/sys/fs/cgroup/cpu/cpu.cfs_quota_us") + if os.IsNotExist(err) { + return math.MaxUint, nil + } else if err != nil { + return 0, fmt.Errorf("reading cpu.cfs_quota_us: %w", err) + } + // can be -1 + quota, err := strconv.ParseInt(strings.TrimSpace(string(buf)), 10, 32) if err != nil { - return 0, errors.Wrapf(err, "executing nproc") + return 0, fmt.Errorf("invalid CPU quota: %w", err) } - - nproc, err := strconv.ParseInt(strings.TrimSpace(string(nprocBuf)), 10, 32) + buf, err = os.ReadFile("/sys/fs/cgroup/cpu/cpu.cfs_period_us") + if os.IsNotExist(err) { + return math.MaxUint, nil + } else if err != nil { + return 0, fmt.Errorf("reading cpu.cfs_period_us: %w", err) + } + period, err := strconv.ParseUint(strings.TrimSpace(string(buf)), 10, 32) if err != nil { - return 0, errors.Wrapf(err, "parsing nproc output") + return 0, fmt.Errorf("invalid CPU period: %w", err) + } + if quota > 0 && period > 0 { + return uint((uint64(quota) + period - 1) / period), nil } - return uint(nproc), nil + return math.MaxUint, nil } diff --git a/mantle/system/ns/enter.go b/mantle/system/ns/enter.go index b65c1186..996fd484 100644 --- a/mantle/system/ns/enter.go +++ b/mantle/system/ns/enter.go @@ -60,7 +60,16 @@ func Create() (netns.NsHandle, error) { return netns.None(), err } defer origns.Close() - defer netns.Set(origns) - return netns.New() + newns, err := netns.New() + if err != nil { + return netns.None(), err + } + + if err := netns.Set(origns); err != nil { + newns.Close() + return netns.None(), err + } + + return newns, nil } diff --git a/mantle/system/ns/exec.go b/mantle/system/ns/exec.go index 1a9ab3ec..12aec308 100644 --- a/mantle/system/ns/exec.go +++ b/mantle/system/ns/exec.go @@ -37,9 +37,14 @@ func (cmd *Cmd) CombinedOutput() ([]byte, error) { if err != nil { return nil, err } - defer nsExit() - return cmd.ExecCmd.CombinedOutput() + r, rerr := cmd.ExecCmd.CombinedOutput() + + if err := nsExit(); err != nil { + return nil, err + } + + return r, rerr } func (cmd *Cmd) Output() ([]byte, error) { @@ -47,9 +52,14 @@ func (cmd *Cmd) Output() ([]byte, error) { if err != nil { return nil, err } - defer nsExit() - return cmd.ExecCmd.Output() + r, rerr := cmd.ExecCmd.Output() + + if err := nsExit(); err != nil { + return nil, err + } + + return r, rerr } func (cmd *Cmd) Run() error { @@ -57,9 +67,14 @@ func (cmd *Cmd) Run() error { if err != nil { return err } - defer nsExit() - return cmd.ExecCmd.Run() + rerr := cmd.ExecCmd.Run() + + if err := nsExit(); err != nil { + return err + } + + return rerr } func (cmd *Cmd) Start() error { @@ -67,7 +82,12 @@ func (cmd *Cmd) Start() error { if err != nil { return err } - defer nsExit() - return cmd.ExecCmd.Start() + rerr := cmd.ExecCmd.Start() + + if err := nsExit(); err != nil { + return err + } + + return rerr } diff --git a/mantle/system/symlink.go b/mantle/system/symlink.go deleted file mode 100644 index c8677454..00000000 --- a/mantle/system/symlink.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package system - -import ( - "os" -) - -// IsSymlink checks if a path is a symbolic link. -func IsSymlink(path string) bool { - st, err := os.Lstat(path) - return err != nil && st.Mode()&os.ModeSymlink == os.ModeSymlink -} diff --git a/mantle/system/user/user.go b/mantle/system/user/user.go deleted file mode 100644 index 9bcd663b..00000000 --- a/mantle/system/user/user.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// user provides an extended User struct to simplify usage -package user - -import ( - "os/user" - "strconv" -) - -// User represents a user account. -type User struct { - *user.User - Groupname string - // For convenience so users don't need to strconv themselves. - UidNo int - GidNo int -} - -// Current returns the current user. -func Current() (*User, error) { - return newUser(user.Current()) -} - -// Lookup looks up a user by username. -func Lookup(username string) (*User, error) { - return newUser(user.Lookup(username)) -} - -// LookupId looks up a user by userid. -func LookupId(uid string) (*User, error) { - return newUser(user.LookupId(uid)) -} - -// Convert the stock user.User to our own User strict with group info. -func newUser(u *user.User, err error) (*User, error) { - if err != nil { - return nil, err - } - - g, err := user.LookupGroupId(u.Gid) - if err != nil { - return nil, err - } - - uid, err := strconv.Atoi(u.Uid) - if err != nil { - return nil, err - } - - gid, err := strconv.Atoi(u.Gid) - if err != nil { - return nil, err - } - - return &User{ - User: u, - Groupname: g.Name, - UidNo: uid, - GidNo: gid, - }, nil -} diff --git a/mantle/system/user/user_test.go b/mantle/system/user/user_test.go deleted file mode 100644 index f2de9df6..00000000 --- a/mantle/system/user/user_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// Copyright 2011 The Go Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package user - -import ( - "fmt" - "testing" -) - -func TestCurrent(t *testing.T) { - u, err := Current() - if err != nil { - t.Fatalf("Current: %v", err) - } - if u.HomeDir == "" { - t.Errorf("didn't get a HomeDir") - } - if u.Username == "" { - t.Errorf("didn't get a username") - } - if u.Groupname == "" { - t.Errorf("didn't get a groupname") - } - if u.Uid != fmt.Sprintf("%d", u.UidNo) { - t.Errorf("Uid %q and %d do not match", u.Uid, u.UidNo) - } - if u.Gid != fmt.Sprintf("%d", u.GidNo) { - t.Errorf("Gid %q and %d do not match", u.Gid, u.GidNo) - } -} - -func compare(t *testing.T, want, got *User) { - if want.Uid != got.Uid { - t.Errorf("got Uid=%q; want %q", got.Uid, want.Uid) - } - if want.Username != got.Username { - t.Errorf("got Username=%q; want %q", got.Username, want.Username) - } - if want.Name != got.Name { - t.Errorf("got Name=%q; want %q", got.Name, want.Name) - } - if want.Gid != got.Gid { - t.Errorf("got Gid=%q; want %q", got.Gid, want.Gid) - } - if want.Groupname != got.Groupname { - t.Errorf("got Groupname=%q; want %q", got.Gid, want.Gid) - } - if want.HomeDir != got.HomeDir { - t.Errorf("got HomeDir=%q; want %q", got.HomeDir, want.HomeDir) - } - if want.UidNo != got.UidNo { - t.Errorf("got UidNo=%d; want %d", got.UidNo, want.UidNo) - } - if want.GidNo != got.GidNo { - t.Errorf("got GidNo=%d; want %d", got.GidNo, want.GidNo) - } -} - -func TestLookup(t *testing.T) { - want, err := Current() - if err != nil { - t.Fatalf("Current: %v", err) - } - got, err := Lookup(want.Username) - if err != nil { - t.Fatalf("Lookup: %v", err) - } - compare(t, want, got) -} - -func TestLookupId(t *testing.T) { - want, err := Current() - if err != nil { - t.Fatalf("Current: %v", err) - } - got, err := LookupId(want.Uid) - if err != nil { - t.Fatalf("LookupId: %v", err) - } - compare(t, want, got) -} diff --git a/mantle/util/common.go b/mantle/util/common.go index 21f1c3b8..f597d1df 100644 --- a/mantle/util/common.go +++ b/mantle/util/common.go @@ -80,7 +80,9 @@ func CreateSSHAuthorizedKey(tmpd string) ([]byte, string, error) { var err error sshKeyPath := filepath.Join(tmpd, "ssh.key") sshPubKeyPath := sshKeyPath + ".pub" - err = exec.Command("ssh-keygen", "-N", "", "-t", "ed25519", "-f", sshKeyPath).Run() + c := exec.Command("ssh-keygen", "-N", "", "-t", "ed25519", "-f", sshKeyPath) + c.Stderr = os.Stderr + err = c.Run() if err != nil { return nil, "", errors.Wrapf(err, "running ssh-keygen") } @@ -114,7 +116,7 @@ func RunCmdTimeout(timeout time.Duration, cmd string, args ...string) error { case <-time.After(timeout): // this uses the waitid(WNOWAIT) trick to avoid racing: // https://github.com/golang/go/commit/cea29c4a358004d84d8711a07628c2f856b381e8 - c.Process.Kill() + _ = c.Process.Kill() <-errc return fmt.Errorf("%s timed out after %s", cmd, timeout) } diff --git a/mantle/sdk/distros.go b/mantle/util/distros.go similarity index 88% rename from mantle/sdk/distros.go rename to mantle/util/distros.go index 634fd0a0..85eda440 100644 --- a/mantle/sdk/distros.go +++ b/mantle/util/distros.go @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package sdk +package util import ( "fmt" "path/filepath" "strings" - "github.com/coreos/coreos-assembler-schema/cosa" + "github.com/coreos/coreos-assembler/pkg/builds" ) // TargetDistroFromName returns the distribution given @@ -34,12 +34,12 @@ func TargetDistroFromName(artifact string) string { } // TargetDistro returns the distribution of a cosa build -func TargetDistro(build *cosa.Build) (string, error) { +func TargetDistro(build *builds.Build) (string, error) { switch build.Name { - case "nestos": - return "nestos", nil case "rhcos": return "rhcos", nil + case "scos": + return "rhcos", nil case "fedora-coreos": return "fcos", nil default: diff --git a/mantle/util/logio.go b/mantle/util/logio.go index debe304f..4aa69a28 100644 --- a/mantle/util/logio.go +++ b/mantle/util/logio.go @@ -16,11 +16,8 @@ package util import ( "bufio" - "fmt" "io" - "os" - "github.com/coreos/ioprogress" "github.com/coreos/pkg/capnslog" ) @@ -36,38 +33,3 @@ func LogFrom(l capnslog.LogLevel, r io.Reader) { plog.Errorf("Reading %s failed: %v", r, err) } } - -// CopyProgress copies data from reader into writter, logging progress through level. -func CopyProgress(level capnslog.LogLevel, prefix string, writer io.Writer, reader io.Reader, total int64) (int64, error) { - // TODO(marineam): would be nice to support this natively in - // capnslog so the right output stream and formatter are used. - if plog.LevelAt(level) { - // ripped off from rkt, so another reason to add to capnslog - fmtBytesSize := 18 - barSize := int64(80 - len(prefix) - fmtBytesSize) - bar := ioprogress.DrawTextFormatBarForW(barSize, os.Stderr) - fmtfunc := func(progress, total int64) string { - if total < 0 { - return fmt.Sprintf( - "%s: %v of an unknown total size", - prefix, - ioprogress.ByteUnitStr(progress), - ) - } - return fmt.Sprintf( - "%s: %s %s", - prefix, - bar(progress, total), - ioprogress.DrawTextFormatBytes(progress, total), - ) - } - - reader = &ioprogress.Reader{ - Reader: reader, - Size: total, - DrawFunc: ioprogress.DrawTerminalf(os.Stderr, fmtfunc), - } - } - - return io.Copy(writer, reader) -} diff --git a/mantle/sdk/repo.go b/mantle/util/repo.go similarity index 90% rename from mantle/sdk/repo.go rename to mantle/util/repo.go index 58c7daaf..d957a0ba 100644 --- a/mantle/sdk/repo.go +++ b/mantle/util/repo.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package sdk +package util import ( "fmt" @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" - "github.com/coreos/coreos-assembler-schema/cosa" + cosa "github.com/coreos/coreos-assembler/pkg/builds" ) const ( @@ -101,13 +101,3 @@ func GetLocalBuild(root, buildid, arch string) (*LocalBuild, error) { Meta: cosameta, }, nil } - -// TODO replace with coreos-assembler concepts -func BoardRoot(board string) string { - return "" -} - -// TODO replace with coreos-assembler concepts -func BuildRoot() string { - return "" -} diff --git a/mantle/util/xz.go b/mantle/util/xz.go deleted file mode 100644 index 475414cd..00000000 --- a/mantle/util/xz.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 Red Hat. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io" - "os" - "os/exec" - - "github.com/ulikunitz/xz" -) - -func XzDecompressStream(out io.Writer, in io.Reader) error { - // opportunistically use the `xz` CLI if available since it's way faster - xzPath, err := exec.LookPath("xz") - if err == nil { - cmd := exec.Command(xzPath, "--decompress", "--stdout") - cmd.Stdin = in - cmd.Stdout = out - return cmd.Run() - } - - reader, err := xz.NewReader(in) - if err != nil { - return err - } - - _, err = io.Copy(out, reader) - return err -} - -// XzDecompressFile does xz decompression from src file into dst file -func XzDecompressFile(dst, src string) error { - in, err := os.Open(src) - if err != nil { - return err - } - defer in.Close() - - out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - defer out.Close() - - if err = XzDecompressStream(out, in); err != nil { - os.Remove(dst) - } - return err -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/README.md b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/README.md deleted file mode 100644 index 0a3cf82e..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Azure Service Management packages for Go - -The `github.com/Azure/azure-sdk-for-go/management` packages are used to perform operations using the Azure Service Management (ASM), aka classic deployment model. Read more about [Azure Resource Manager vs. classic deployment](https://azure.microsoft.com/documentation/articles/resource-manager-deployment-model/). Packages for Azure Resource Manager are in the [arm](../arm) folder. - -## First a Sidenote: Authentication and the Azure Service Manager - -The client currently supports authentication to the Service Management -API with certificates or Azure `.publishSettings` file. You can -download the `.publishSettings` file for your subscriptions -[here](https://manage.windowsazure.com/publishsettings). - -### Example: Creating a Linux Virtual Machine - -Complete source code for this example can be found in [example.go](/management/examples/example.go).To try this example, [download your .publishSettings](https://manage.windowsazure.com/publishsettings) and add its path in the first line of the main function. Run the example with commands: - -``` -$ cd management/examples -$ go run example.go -``` \ No newline at end of file diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/client.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/client.go deleted file mode 100644 index 67da834f..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/client.go +++ /dev/null @@ -1,138 +0,0 @@ -// Package management provides the main API client to construct other clients -// and make requests to the Microsoft Azure Service Management REST API. -package management - -import ( - "errors" - "time" -) - -const ( - DefaultAzureManagementURL = "https://management.core.windows.net" - DefaultOperationPollInterval = time.Second * 30 - DefaultAPIVersion = "2014-10-01" - DefaultUserAgent = "azure-sdk-for-go" - - errPublishSettingsConfiguration = "PublishSettingsFilePath is set. Consequently ManagementCertificatePath and SubscriptionId must not be set." - errManagementCertificateConfiguration = "Both ManagementCertificatePath and SubscriptionId should be set, and PublishSettingsFilePath must not be set." - errParamNotSpecified = "Parameter %s is not specified." -) - -type client struct { - publishSettings publishSettings - config ClientConfig -} - -// Client is the base Azure Service Management API client instance that -// can be used to construct client instances for various services. -type Client interface { - // SendAzureGetRequest sends a request to the management API using the HTTP GET method - // and returns the response body or an error. - SendAzureGetRequest(url string) ([]byte, error) - - // SendAzurePostRequest sends a request to the management API using the HTTP POST method - // and returns the request ID or an error. - SendAzurePostRequest(url string, data []byte) (OperationID, error) - - // SendAzurePostRequestWithReturnedResponse sends a request to the management API using - // the HTTP POST method and returns the response body or an error. - SendAzurePostRequestWithReturnedResponse(url string, data []byte) ([]byte, error) - - // SendAzurePutRequest sends a request to the management API using the HTTP PUT method - // and returns the request ID or an error. The content type can be specified, however - // if an empty string is passed, the default of "application/xml" will be used. - SendAzurePutRequest(url, contentType string, data []byte) (OperationID, error) - - // SendAzureDeleteRequest sends a request to the management API using the HTTP DELETE method - // and returns the request ID or an error. - SendAzureDeleteRequest(url string) (OperationID, error) - - // GetOperationStatus gets the status of operation with given Operation ID. - // WaitForOperation utility method can be used for polling for operation status. - GetOperationStatus(operationID OperationID) (GetOperationStatusResponse, error) - - // WaitForOperation polls the Azure API for given operation ID indefinitely - // until the operation is completed with either success or failure. - // It is meant to be used for waiting for the result of the methods that - // return an OperationID value (meaning a long running operation has started). - // - // Cancellation of the polling loop (for instance, timing out) is done through - // cancel channel. If the user does not want to cancel, a nil chan can be provided. - // To cancel the method, it is recommended to close the channel provided to this - // method. - // - // If the operation was not successful or cancelling is signaled, an error - // is returned. - WaitForOperation(operationID OperationID, cancel chan struct{}) error -} - -// ClientConfig provides a configuration for use by a Client. -type ClientConfig struct { - ManagementURL string - OperationPollInterval time.Duration - UserAgent string - APIVersion string -} - -// NewAnonymousClient creates a new azure.Client with no credentials set. -func NewAnonymousClient() Client { - return client{} -} - -// DefaultConfig returns the default client configuration used to construct -// a client. This value can be used to make modifications on the default API -// configuration. -func DefaultConfig() ClientConfig { - return ClientConfig{ - ManagementURL: DefaultAzureManagementURL, - OperationPollInterval: DefaultOperationPollInterval, - APIVersion: DefaultAPIVersion, - UserAgent: DefaultUserAgent, - } -} - -// NewClient creates a new Client using the given subscription ID and -// management certificate. -func NewClient(subscriptionID string, managementCert []byte) (Client, error) { - return NewClientFromConfig(subscriptionID, managementCert, DefaultConfig()) -} - -// NewClientFromConfig creates a new Client using a given ClientConfig. -func NewClientFromConfig(subscriptionID string, managementCert []byte, config ClientConfig) (Client, error) { - return makeClient(subscriptionID, managementCert, config) -} - -func makeClient(subscriptionID string, managementCert []byte, config ClientConfig) (Client, error) { - var c client - - if subscriptionID == "" { - return c, errors.New("azure: subscription ID required") - } - - if len(managementCert) == 0 { - return c, errors.New("azure: management certificate required") - } - - publishSettings := publishSettings{ - SubscriptionID: subscriptionID, - SubscriptionCert: managementCert, - SubscriptionKey: managementCert, - } - - // Validate client configuration - switch { - case config.ManagementURL == "": - return c, errors.New("azure: base URL required") - case config.OperationPollInterval <= 0: - return c, errors.New("azure: operation polling interval must be a positive duration") - case config.APIVersion == "": - return c, errors.New("azure: client configuration must specify an API version") - case config.UserAgent == "": - config.UserAgent = DefaultUserAgent - } - - return client{ - publishSettings: publishSettings, - config: config, - }, nil -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/errors.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/errors.go deleted file mode 100644 index 79859454..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/errors.go +++ /dev/null @@ -1,36 +0,0 @@ -package management - -import ( - "encoding/xml" - "fmt" -) - -// AzureError represents an error returned by the management API. It has an error -// code (for example, ResourceNotFound) and a descriptive message. -type AzureError struct { - Code string - Message string -} - -//Error implements the error interface for the AzureError type. -func (e AzureError) Error() string { - return fmt.Sprintf("Error response from Azure. Code: %s, Message: %s", e.Code, e.Message) -} - -// IsResourceNotFoundError returns true if the provided error is an AzureError -// reporting that a given resource has not been found. -func IsResourceNotFoundError(err error) bool { - azureErr, ok := err.(AzureError) - return ok && azureErr.Code == "ResourceNotFound" -} - -// getAzureError converts an error response body into an AzureError instance. -func getAzureError(responseBody []byte) error { - var azErr AzureError - err := xml.Unmarshal(responseBody, &azErr) - if err != nil { - return fmt.Errorf("Failed parsing contents to AzureError format: %v", err) - } - return azErr - -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/http.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/http.go deleted file mode 100644 index 5760f51e..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/http.go +++ /dev/null @@ -1,190 +0,0 @@ -package management - -import ( - "bytes" - "crypto/tls" - "fmt" - "net/http" -) - -const ( - msVersionHeader = "x-ms-version" - requestIDHeader = "x-ms-request-id" - uaHeader = "User-Agent" - contentHeader = "Content-Type" - defaultContentHeaderValue = "application/xml" -) - -func (client client) SendAzureGetRequest(url string) ([]byte, error) { - resp, err := client.sendAzureRequest("GET", url, "", nil) - if err != nil { - return nil, err - } - return getResponseBody(resp) -} - -func (client client) SendAzurePostRequest(url string, data []byte) (OperationID, error) { - return client.doAzureOperation("POST", url, "", data) -} - -func (client client) SendAzurePostRequestWithReturnedResponse(url string, data []byte) ([]byte, error) { - resp, err := client.sendAzureRequest("POST", url, "", data) - if err != nil { - return nil, err - } - - return getResponseBody(resp) -} - -func (client client) SendAzurePutRequest(url, contentType string, data []byte) (OperationID, error) { - return client.doAzureOperation("PUT", url, contentType, data) -} - -func (client client) SendAzureDeleteRequest(url string) (OperationID, error) { - return client.doAzureOperation("DELETE", url, "", nil) -} - -func (client client) doAzureOperation(method, url, contentType string, data []byte) (OperationID, error) { - response, err := client.sendAzureRequest(method, url, contentType, data) - if err != nil { - return "", err - } - return getOperationID(response) -} - -func getOperationID(response *http.Response) (OperationID, error) { - requestID := response.Header.Get(requestIDHeader) - if requestID == "" { - return "", fmt.Errorf("Could not retrieve operation id from %q header", requestIDHeader) - } - return OperationID(requestID), nil -} - -// sendAzureRequest constructs an HTTP client for the request, sends it to the -// management API and returns the response or an error. -func (client client) sendAzureRequest(method, url, contentType string, data []byte) (*http.Response, error) { - if method == "" { - return nil, fmt.Errorf(errParamNotSpecified, "method") - } - if url == "" { - return nil, fmt.Errorf(errParamNotSpecified, "url") - } - - httpClient, err := client.createHTTPClient() - if err != nil { - return nil, err - } - - response, err := client.sendRequest(httpClient, url, method, contentType, data, 5) - if err != nil { - return nil, err - } - - return response, nil -} - -// createHTTPClient creates an HTTP Client configured with the key pair for -// the subscription for this client. -func (client client) createHTTPClient() (*http.Client, error) { - cert, err := tls.X509KeyPair(client.publishSettings.SubscriptionCert, client.publishSettings.SubscriptionKey) - if err != nil { - return nil, err - } - - return &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSClientConfig: &tls.Config{ - Renegotiation: tls.RenegotiateOnceAsClient, - Certificates: []tls.Certificate{cert}, - }, - }, - }, nil -} - -// sendRequest sends a request to the Azure management API using the given -// HTTP client and parameters. It returns the response from the call or an -// error. -func (client client) sendRequest(httpClient *http.Client, url, requestType, contentType string, data []byte, numberOfRetries int) (*http.Response, error) { - - absURI := client.createAzureRequestURI(url) - - for { - request, reqErr := client.createAzureRequest(absURI, requestType, contentType, data) - if reqErr != nil { - return nil, reqErr - } - - response, err := httpClient.Do(request) - if err != nil { - if numberOfRetries == 0 { - return nil, err - } - - return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) - } - if response.StatusCode == http.StatusTemporaryRedirect { - // ASM's way of moving traffic around, see https://msdn.microsoft.com/en-us/library/azure/ee460801.aspx - // Only handled automatically for GET/HEAD requests. This is for the rest of the http verbs. - u, err := response.Location() - if err != nil { - return response, fmt.Errorf("Redirect requested but location header could not be retrieved: %v", err) - } - absURI = u.String() - continue // re-issue request - } - - if response.StatusCode >= http.StatusBadRequest { - body, err := getResponseBody(response) - if err != nil { - // Failed to read the response body - return nil, err - } - azureErr := getAzureError(body) - if azureErr != nil { - if numberOfRetries == 0 { - return nil, azureErr - } - - return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) - } - } - - return response, nil - } -} - -// createAzureRequestURI constructs the request uri using the management API endpoint and -// subscription ID associated with the client. -func (client client) createAzureRequestURI(url string) string { - return fmt.Sprintf("%s/%s/%s", client.config.ManagementURL, client.publishSettings.SubscriptionID, url) -} - -// createAzureRequest packages up the request with the correct set of headers and returns -// the request object or an error. -func (client client) createAzureRequest(url string, requestType string, contentType string, data []byte) (*http.Request, error) { - var request *http.Request - var err error - - if data != nil { - body := bytes.NewBuffer(data) - request, err = http.NewRequest(requestType, url, body) - } else { - request, err = http.NewRequest(requestType, url, nil) - } - - if err != nil { - return nil, err - } - - request.Header.Set(msVersionHeader, client.config.APIVersion) - request.Header.Set(uaHeader, client.config.UserAgent) - - if contentType != "" { - request.Header.Set(contentHeader, contentType) - } else { - request.Header.Set(contentHeader, defaultContentHeaderValue) - } - - return request, nil -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/location/client.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/location/client.go deleted file mode 100644 index 721f3f60..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/location/client.go +++ /dev/null @@ -1,30 +0,0 @@ -// Package location provides a client for Locations. -package location - -import ( - "encoding/xml" - - "github.com/Azure/azure-sdk-for-go/management" -) - -const ( - azureLocationListURL = "locations" - errParamNotSpecified = "Parameter %s is not specified." -) - -//NewClient is used to instantiate a new LocationClient from an Azure client -func NewClient(client management.Client) LocationClient { - return LocationClient{client: client} -} - -func (c LocationClient) ListLocations() (ListLocationsResponse, error) { - var l ListLocationsResponse - - response, err := c.client.SendAzureGetRequest(azureLocationListURL) - if err != nil { - return l, err - } - - err = xml.Unmarshal(response, &l) - return l, err -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/location/entities.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/location/entities.go deleted file mode 100644 index 8a250158..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/location/entities.go +++ /dev/null @@ -1,37 +0,0 @@ -package location - -import ( - "bytes" - "encoding/xml" - "fmt" - "strings" - - "github.com/Azure/azure-sdk-for-go/management" -) - -//LocationClient is used to perform operations on Azure Locations -type LocationClient struct { - client management.Client -} - -type ListLocationsResponse struct { - XMLName xml.Name `xml:"Locations"` - Locations []Location `xml:"Location"` -} - -type Location struct { - Name string - DisplayName string - AvailableServices []string `xml:"AvailableServices>AvailableService"` - WebWorkerRoleSizes []string `xml:"ComputeCapabilities>WebWorkerRoleSizes>RoleSize"` - VirtualMachineRoleSizes []string `xml:"ComputeCapabilities>VirtualMachinesRoleSizes>RoleSize"` -} - -func (ll ListLocationsResponse) String() string { - var buf bytes.Buffer - for _, l := range ll.Locations { - fmt.Fprintf(&buf, "%s, ", l.Name) - } - - return strings.Trim(buf.String(), ", ") -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/operations.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/operations.go deleted file mode 100644 index 4f6acb21..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/operations.go +++ /dev/null @@ -1,92 +0,0 @@ -package management - -import ( - "encoding/xml" - "errors" - "fmt" - "time" -) - -var ( - // ErrOperationCancelled from WaitForOperation when the polling loop is - // cancelled through signaling the channel. - ErrOperationCancelled = errors.New("Polling for operation status cancelled") -) - -// GetOperationStatusResponse represents an in-flight operation. Use -// client.GetOperationStatus() to get the operation given the operation ID, or -// use WaitForOperation() to poll and wait until the operation has completed. -// See https://msdn.microsoft.com/en-us/library/azure/ee460783.aspx -type GetOperationStatusResponse struct { - XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Operation"` - ID string - Status OperationStatus - HTTPStatusCode string - Error *AzureError -} - -// OperationStatus describes the states an Microsoft Azure Service Management -// operation an be in. -type OperationStatus string - -// List of states an operation can be reported as -const ( - OperationStatusInProgress OperationStatus = "InProgress" - OperationStatusSucceeded OperationStatus = "Succeeded" - OperationStatusFailed OperationStatus = "Failed" -) - -// OperationID is assigned by Azure API and can be used to look up the status of -// an operation -type OperationID string - -func (c client) GetOperationStatus(operationID OperationID) (GetOperationStatusResponse, error) { - operation := GetOperationStatusResponse{} - if operationID == "" { - return operation, fmt.Errorf(errParamNotSpecified, "operationID") - } - - url := fmt.Sprintf("operations/%s", operationID) - response, azureErr := c.SendAzureGetRequest(url) - if azureErr != nil { - return operation, azureErr - } - - err := xml.Unmarshal(response, &operation) - return operation, err -} - -func (c client) WaitForOperation(operationID OperationID, cancel chan struct{}) error { - for { - done, err := c.checkOperationStatus(operationID) - if err != nil || done { - return err - } - select { - case <-time.After(c.config.OperationPollInterval): - case <-cancel: - return ErrOperationCancelled - } - } -} - -func (c client) checkOperationStatus(id OperationID) (done bool, err error) { - op, err := c.GetOperationStatus(id) - if err != nil { - return false, fmt.Errorf("Failed to get operation status '%s': %v", id, err) - } - - switch op.Status { - case OperationStatusSucceeded: - return true, nil - case OperationStatusFailed: - if op.Error != nil { - return true, op.Error - } - return true, fmt.Errorf("Azure Operation (x-ms-request-id=%s) has failed", id) - case OperationStatusInProgress: - return false, nil - default: - return false, fmt.Errorf("Unknown operation status returned from API: %s (x-ms-request-id=%s)", op.Status, id) - } -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/publishSettings.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/publishSettings.go deleted file mode 100644 index 17505536..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/publishSettings.go +++ /dev/null @@ -1,108 +0,0 @@ -package management - -import ( - "encoding/base64" - "encoding/pem" - "encoding/xml" - "fmt" - "io/ioutil" - - "golang.org/x/crypto/pkcs12" -) - -// ClientFromPublishSettingsData unmarshalls the contents of a publish settings file -// from https://manage.windowsazure.com/publishsettings. -// If subscriptionID is left empty, the first subscription in the file is used. -func ClientFromPublishSettingsData(settingsData []byte, subscriptionID string) (client Client, err error) { - return ClientFromPublishSettingsDataWithConfig(settingsData, subscriptionID, DefaultConfig()) -} - -// ClientFromPublishSettingsFile reads a publish settings file downloaded from https://manage.windowsazure.com/publishsettings. -// If subscriptionID is left empty, the first subscription in the file is used. -func ClientFromPublishSettingsFile(filePath, subscriptionID string) (client Client, err error) { - return ClientFromPublishSettingsFileWithConfig(filePath, subscriptionID, DefaultConfig()) -} - -// ClientFromPublishSettingsFileWithConfig reads a publish settings file downloaded from https://manage.windowsazure.com/publishsettings. -// If subscriptionID is left empty, the first subscription in the file is used. -func ClientFromPublishSettingsFileWithConfig(filePath, subscriptionID string, config ClientConfig) (client Client, err error) { - if filePath == "" { - return client, fmt.Errorf(errParamNotSpecified, "filePath") - } - - publishSettingsContent, err := ioutil.ReadFile(filePath) - if err != nil { - return client, err - } - - return ClientFromPublishSettingsDataWithConfig(publishSettingsContent, subscriptionID, config) -} - -// ClientFromPublishSettingsDataWithConfig unmarshalls the contents of a publish settings file -// from https://manage.windowsazure.com/publishsettings. -// If subscriptionID is left empty, the first subscription in the string is used. -func ClientFromPublishSettingsDataWithConfig(data []byte, subscriptionID string, config ClientConfig) (client Client, err error) { - publishData := publishData{} - if err = xml.Unmarshal(data, &publishData); err != nil { - return client, err - } - - for _, profile := range publishData.PublishProfiles { - for _, sub := range profile.Subscriptions { - if sub.ID == subscriptionID || subscriptionID == "" { - base64Cert := sub.ManagementCertificate - if base64Cert == "" { - base64Cert = profile.ManagementCertificate - } - - pfxData, err := base64.StdEncoding.DecodeString(base64Cert) - if err != nil { - return client, err - } - - pems, err := pkcs12.ToPEM(pfxData, "") - if err != nil { - return client, err - } - - cert := []byte{} - for _, b := range pems { - cert = append(cert, pem.EncodeToMemory(b)...) - } - - config.ManagementURL = sub.ServiceManagementURL - return makeClient(sub.ID, cert, config) - } - } - } - - return client, fmt.Errorf("could not find subscription '%s' in settings provided", subscriptionID) -} - -type publishSettings struct { - SubscriptionID string - SubscriptionCert []byte - SubscriptionKey []byte -} - -type publishData struct { - XMLName xml.Name `xml:"PublishData"` - PublishProfiles []publishProfile `xml:"PublishProfile"` -} - -type publishProfile struct { - XMLName xml.Name `xml:"PublishProfile"` - SchemaVersion string `xml:",attr"` - PublishMethod string `xml:",attr"` - URL string `xml:"Url,attr"` - ManagementCertificate string `xml:",attr"` - Subscriptions []subscription `xml:"Subscription"` -} - -type subscription struct { - XMLName xml.Name `xml:"Subscription"` - ServiceManagementURL string `xml:"ServiceManagementUrl,attr"` - ID string `xml:"Id,attr"` - Name string `xml:",attr"` - ManagementCertificate string `xml:",attr"` -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go deleted file mode 100644 index dad87e6d..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package storageservice provides a client for Storage Services. -package storageservice - -import ( - "encoding/xml" - "fmt" - - "github.com/Azure/azure-sdk-for-go/management" -) - -const ( - azureStorageServiceListURL = "services/storageservices" - azureStorageServiceURL = "services/storageservices/%s" - azureStorageServiceKeysURL = "services/storageservices/%s/keys" - azureStorageAccountAvailabilityURL = "services/storageservices/operations/isavailable/%s" - - azureXmlns = "http://schemas.microsoft.com/windowsazure" - - errParamNotSpecified = "Parameter %s is not specified." -) - -// NewClient is used to instantiate a new StorageServiceClient from an Azure -// client. -func NewClient(s management.Client) StorageServiceClient { - return StorageServiceClient{client: s} -} - -func (s StorageServiceClient) ListStorageServices() (ListStorageServicesResponse, error) { - var l ListStorageServicesResponse - response, err := s.client.SendAzureGetRequest(azureStorageServiceListURL) - if err != nil { - return l, err - } - - err = xml.Unmarshal(response, &l) - return l, err -} - -func (s StorageServiceClient) GetStorageService(serviceName string) (StorageServiceResponse, error) { - var svc StorageServiceResponse - if serviceName == "" { - return svc, fmt.Errorf(errParamNotSpecified, "serviceName") - } - - requestURL := fmt.Sprintf(azureStorageServiceURL, serviceName) - response, err := s.client.SendAzureGetRequest(requestURL) - if err != nil { - return svc, err - } - - err = xml.Unmarshal(response, &svc) - return svc, err -} - -func (s StorageServiceClient) GetStorageServiceKeys(serviceName string) (GetStorageServiceKeysResponse, error) { - var r GetStorageServiceKeysResponse - if serviceName == "" { - return r, fmt.Errorf(errParamNotSpecified, "serviceName") - } - - requestURL := fmt.Sprintf(azureStorageServiceKeysURL, serviceName) - data, err := s.client.SendAzureGetRequest(requestURL) - if err != nil { - return r, err - } - - err = xml.Unmarshal(data, &r) - return r, err -} - -func (s StorageServiceClient) CreateStorageService(parameters StorageAccountCreateParameters) (management.OperationID, error) { - data, err := xml.Marshal(CreateStorageServiceInput{ - StorageAccountCreateParameters: parameters}) - if err != nil { - return "", err - } - - return s.client.SendAzurePostRequest(azureStorageServiceListURL, data) -} - -func (s StorageServiceClient) DeleteStorageService(serviceName string) (management.OperationID, error) { - if serviceName == "" { - return "", fmt.Errorf(errParamNotSpecified, "serviceName") - } - - requestURL := fmt.Sprintf(azureStorageServiceURL, serviceName) - return s.client.SendAzureDeleteRequest(requestURL) -} - -// CheckStorageAccountNameAvailability checks to if the specified storage account -// name is available. -// -// See https://msdn.microsoft.com/en-us/library/azure/jj154125.aspx -func (s StorageServiceClient) CheckStorageAccountNameAvailability(name string) (AvailabilityResponse, error) { - var r AvailabilityResponse - if name == "" { - return r, fmt.Errorf(errParamNotSpecified, "name") - } - - requestURL := fmt.Sprintf(azureStorageAccountAvailabilityURL, name) - response, err := s.client.SendAzureGetRequest(requestURL) - if err != nil { - return r, err - } - - err = xml.Unmarshal(response, &r) - return r, err -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go deleted file mode 100644 index 2401298a..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go +++ /dev/null @@ -1,79 +0,0 @@ -package storageservice - -import ( - "encoding/xml" - - "github.com/Azure/azure-sdk-for-go/management" -) - -// StorageServiceClient is used to perform operations on Azure Storage -type StorageServiceClient struct { - client management.Client -} - -type ListStorageServicesResponse struct { - StorageServices []StorageServiceResponse `xml:"StorageService"` -} - -type StorageServiceResponse struct { - URL string `xml:"Url"` - ServiceName string - StorageServiceProperties StorageServiceProperties -} - -type StorageServiceProperties struct { - Description string - Location string - Label string - Status string - Endpoints []string `xml:"Endpoints>Endpoint"` - GeoReplicationEnabled string - GeoPrimaryRegion string -} - -type GetStorageServiceKeysResponse struct { - URL string `xml:"Url"` - PrimaryKey string `xml:"StorageServiceKeys>Primary"` - SecondaryKey string `xml:"StorageServiceKeys>Secondary"` -} - -type CreateStorageServiceInput struct { - XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CreateStorageServiceInput"` - StorageAccountCreateParameters -} - -type StorageAccountCreateParameters struct { - ServiceName string - Description string `xml:",omitempty"` - Label string - AffinityGroup string `xml:",omitempty"` - Location string `xml:",omitempty"` - ExtendedProperties ExtendedPropertyList - AccountType AccountType -} - -type AccountType string - -const ( - AccountTypeStandardLRS AccountType = "Standard_LRS" - AccountTypeStandardZRS AccountType = "Standard_ZRS" - AccountTypeStandardGRS AccountType = "Standard_GRS" - AccountTypeStandardRAGRS AccountType = "Standard_RAGRS" - AccountTypePremiumLRS AccountType = "Premium_LRS" -) - -type ExtendedPropertyList struct { - ExtendedProperty []ExtendedProperty -} - -type ExtendedProperty struct { - Name string - Value string -} - -type AvailabilityResponse struct { - XMLName xml.Name `xml:"AvailabilityResponse"` - Xmlns string `xml:"xmlns,attr"` - Result bool - Reason string -} diff --git a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/util.go b/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/util.go deleted file mode 100644 index 72601757..00000000 --- a/mantle/vendor/github.com/Azure/azure-sdk-for-go/management/util.go +++ /dev/null @@ -1,11 +0,0 @@ -package management - -import ( - "io/ioutil" - "net/http" -) - -func getResponseBody(response *http.Response) ([]byte, error) { - defer response.Body.Close() - return ioutil.ReadAll(response.Body) -} diff --git a/mantle/vendor/github.com/IBM-Cloud/bluemix-go/go.mod b/mantle/vendor/github.com/IBM-Cloud/bluemix-go/go.mod deleted file mode 100644 index 0747b77d..00000000 --- a/mantle/vendor/github.com/IBM-Cloud/bluemix-go/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/IBM-Cloud/bluemix-go - -go 1.13 - -require ( - github.com/ghodss/yaml v1.0.0 - github.com/go-openapi/strfmt v0.20.0 - github.com/onsi/ginkgo v1.15.0 - github.com/onsi/gomega v1.10.5 - golang.org/x/net v0.0.0-20210119194325-5f4716e94777 - gopkg.in/yaml.v2 v2.4.0 -) diff --git a/mantle/vendor/github.com/IBM-Cloud/bluemix-go/go.sum b/mantle/vendor/github.com/IBM-Cloud/bluemix-go/go.sum deleted file mode 100644 index 52e4f69d..00000000 --- a/mantle/vendor/github.com/IBM-Cloud/bluemix-go/go.sum +++ /dev/null @@ -1,186 +0,0 @@ -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-openapi/errors v0.19.8 h1:doM+tQdZbUm9gydV9yR+iQNmztbjj7I3sW4sIcAwIzc= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.15.0 h1:1V1NfVQR87RtWAgp1lv9JZJ5Jap+XFGKPi00andXGi4= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.mongodb.org/mongo-driver v1.4.3 h1:moga+uhicpVshTyaqY9L23E6QqwcHRUv1sqyOsoyOO8= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777 h1:003p0dJM77cxMSyCPFphvZf/Y5/NXf5fzg6ufd1/Oew= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/mantle/vendor/github.com/clarketm/json/decode.go b/mantle/vendor/github.com/clarketm/json/decode.go index b4348469..a9917e72 100644 --- a/mantle/vendor/github.com/clarketm/json/decode.go +++ b/mantle/vendor/github.com/clarketm/json/decode.go @@ -200,22 +200,22 @@ func (n Number) Int64() (int64, error) { return strconv.ParseInt(string(n), 10, 64) } +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + // decodeState represents the state while decoding a JSON value. type decodeState struct { - data []byte - off int // next read offset in data - opcode int // last read result - scan scanner - errorContext struct { // provides context for type errors - Struct reflect.Type - FieldStack []string - } + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext savedError error useNumber bool disallowUnknownFields bool - // safeUnquote is the number of current string literal bytes that don't - // need to be unquoted. When negative, no bytes need unquoting. - safeUnquote int } // readIndex returns the position of the last byte read. @@ -232,10 +232,11 @@ func (d *decodeState) init(data []byte) *decodeState { d.data = data d.off = 0 d.savedError = nil - d.errorContext.Struct = nil - - // Reuse the allocated space for the FieldStack slice. - d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } return d } @@ -249,12 +250,11 @@ func (d *decodeState) saveError(err error) { // addErrorContext returns a new error enhanced with information from d.errorContext func (d *decodeState) addErrorContext(err error) error { - if d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0 { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() err.Field = strings.Join(d.errorContext.FieldStack, ".") - return err } } return err @@ -317,27 +317,13 @@ func (d *decodeState) rescanLiteral() { Switch: switch data[i-1] { case '"': // string - // safeUnquote is initialized at -1, which means that all bytes - // checked so far can be unquoted at a later time with no work - // at all. When reaching the closing '"', if safeUnquote is - // still -1, all bytes can be unquoted with no work. Otherwise, - // only those bytes up until the first '\\' or non-ascii rune - // can be safely unquoted. - safeUnquote := -1 for ; i < len(data); i++ { - if c := data[i]; c == '\\' { - if safeUnquote < 0 { // first unsafe byte - safeUnquote = int(i - d.off) - } + switch data[i] { + case '\\': i++ // escaped char - } else if c == '"' { - d.safeUnquote = safeUnquote + case '"': i++ // tokenize the closing quote too break Switch - } else if c >= utf8.RuneSelf { - if safeUnquote < 0 { // first unsafe byte - safeUnquote = int(i - d.off) - } } } case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number @@ -674,7 +660,10 @@ func (d *decodeState) object(v reflect.Value) error { } var mapElem reflect.Value - origErrorContext := d.errorContext + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } for { // Read opening " of string key or closing }. @@ -691,7 +680,7 @@ func (d *decodeState) object(v reflect.Value) error { start := d.readIndex() d.rescanLiteral() item := d.data[start:d.readIndex()] - key, ok := d.unquoteBytes(item) + key, ok := unquoteBytes(item) if !ok { panic(phasePanicMsg) } @@ -749,6 +738,9 @@ func (d *decodeState) object(v reflect.Value) error { } subv = subv.Field(i) } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t } else if d.disallowUnknownFields { @@ -829,11 +821,13 @@ func (d *decodeState) object(v reflect.Value) error { if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } - // Reset errorContext to its original state. - // Keep the same underlying array for FieldStack, to reuse the - // space and avoid unnecessary allocs. - d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] - d.errorContext.Struct = origErrorContext.Struct + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } if d.opcode == scanEndObject { break } @@ -892,7 +886,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) return nil } - s, ok := d.unquoteBytes(item) + s, ok := unquoteBytes(item) if !ok { if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) @@ -943,7 +937,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } case '"': // string - s, ok := d.unquoteBytes(item) + s, ok := unquoteBytes(item) if !ok { if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) @@ -1103,7 +1097,7 @@ func (d *decodeState) objectInterface() map[string]interface{} { start := d.readIndex() d.rescanLiteral() item := d.data[start:d.readIndex()] - key, ok := d.unquote(item) + key, ok := unquote(item) if !ok { panic(phasePanicMsg) } @@ -1152,7 +1146,7 @@ func (d *decodeState) literalInterface() interface{} { return c == 't' case '"': // string - s, ok := d.unquote(item) + s, ok := unquote(item) if !ok { panic(phasePanicMsg) } @@ -1195,26 +1189,38 @@ func getu4(s []byte) rune { // unquote converts a quoted JSON string literal s into an actual string t. // The rules are different than for Go, so cannot use strconv.Unquote. -// The first byte in s must be '"'. -func (d *decodeState) unquote(s []byte) (t string, ok bool) { - s, ok = d.unquoteBytes(s) +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) t = string(s) return } -func (d *decodeState) unquoteBytes(s []byte) (t []byte, ok bool) { - // We already know that s[0] == '"'. However, we don't know that the - // closing quote exists in all cases, such as when the string is nested - // via the ",string" option. - if len(s) < 2 || s[len(s)-1] != '"' { +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { return } s = s[1 : len(s)-1] - // If there are no unusual characters, no unquoting is needed, so return - // a slice of the original bytes. - r := d.safeUnquote - if r == -1 { + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { return s, true } diff --git a/mantle/vendor/github.com/clarketm/json/encode.go b/mantle/vendor/github.com/clarketm/json/encode.go index 1b45610a..06b2f754 100644 --- a/mantle/vendor/github.com/clarketm/json/encode.go +++ b/mantle/vendor/github.com/clarketm/json/encode.go @@ -153,7 +153,7 @@ import ( // // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. +// an error. // func Marshal(v interface{}) ([]byte, error) { e := newEncodeState() @@ -236,6 +236,8 @@ func (e *UnsupportedTypeError) Error() string { return "json: unsupported type: " + e.Type.String() } +// An UnsupportedValueError is returned by Marshal when attempting +// to encode an unsupported value. type UnsupportedValueError struct { Value reflect.Value Str string @@ -285,17 +287,31 @@ var hex = "0123456789abcdef" type encodeState struct { bytes.Buffer // accumulated output scratch [64]byte + + // Keep track of what pointers we've seen in the current recursive call + // path, to avoid cycles that could lead to a stack overflow. Only do + // the relatively expensive map operations if ptrLevel is larger than + // startDetectingCyclesAfter, so that we skip the work if we're within a + // reasonable amount of nested pointers deep. + ptrLevel uint + ptrSeen map[interface{}]struct{} } +const startDetectingCyclesAfter = 1000 + var encodeStatePool sync.Pool func newEncodeState() *encodeState { if v := encodeStatePool.Get(); v != nil { e := v.(*encodeState) e.Reset() + if len(e.ptrSeen) > 0 { + panic("ptrEncoder.encode should have emptied ptrSeen via defers") + } + e.ptrLevel = 0 return e } - return new(encodeState) + return &encodeState{ptrSeen: make(map[interface{}]struct{})} } // jsonError is an error wrapper type for internal use only. @@ -632,11 +648,12 @@ func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) { return } if opts.quoted { - b := make([]byte, 0, v.Len()+2) - b = append(b, '"') - b = append(b, []byte(v.String())...) - b = append(b, '"') - e.stringBytes(b, opts.escapeHTML) + e2 := newEncodeState() + // Since we encode the string twice, we only need to escape HTML + // the first time. + e2.string(v.String(), opts.escapeHTML) + e.stringBytes(e2.Bytes(), false) + encodeStatePool.Put(e2) } else { e.string(v.String(), opts.escapeHTML) } @@ -646,7 +663,7 @@ func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) { func isValidNumber(s string) bool { // This function implements the JSON numbers grammar. // See https://tools.ietf.org/html/rfc7159#section-6 - // and https://json.org/number.gif + // and https://www.json.org/img/number.png if s == "" { return false @@ -775,28 +792,40 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + ptr := v.Pointer() + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } e.WriteByte('{') // Extract and sort the keys. - keys := v.MapKeys() - sv := make([]reflectWithString, len(keys)) - for i, v := range keys { - sv[i].v = v + sv := make([]reflectWithString, v.Len()) + mi := v.MapRange() + for i := 0; mi.Next(); i++ { + sv[i].k = mi.Key() + sv[i].v = mi.Value() if err := sv[i].resolve(); err != nil { e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error())) } } - sort.Slice(sv, func(i, j int) bool { return sv[i].s < sv[j].s }) + sort.Slice(sv, func(i, j int) bool { return sv[i].ks < sv[j].ks }) for i, kv := range sv { if i > 0 { e.WriteByte(',') } - e.string(kv.s, opts.escapeHTML) + e.string(kv.ks, opts.escapeHTML) e.WriteByte(':') - me.elemEnc(e, v.MapIndex(kv.v), opts) + me.elemEnc(e, kv.v, opts) } e.WriteByte('}') + e.ptrLevel-- } func newMapEncoder(t reflect.Type) encoderFunc { @@ -853,7 +882,23 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + // Here we use a struct to memorize the pointer to the first element of the slice + // and its length. + ptr := struct { + ptr uintptr + len int + }{v.Pointer(), v.Len()} + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } se.arrayEnc(e, v, opts) + e.ptrLevel-- } func newSliceEncoder(t reflect.Type) encoderFunc { @@ -898,7 +943,18 @@ func (pe ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + ptr := v.Interface() + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } pe.elemEnc(e, v.Elem(), opts) + e.ptrLevel-- } func newPtrEncoder(t reflect.Type) encoderFunc { @@ -931,7 +987,7 @@ func isValidTag(s string) bool { } for _, c := range s { switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. @@ -953,29 +1009,30 @@ func typeByIndex(t reflect.Type, index []int) reflect.Type { } type reflectWithString struct { - v reflect.Value - s string + k reflect.Value + v reflect.Value + ks string } func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() + if w.k.Kind() == reflect.String { + w.ks = w.k.String() return nil } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - if w.v.Kind() == reflect.Ptr && w.v.IsNil() { + if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok { + if w.k.Kind() == reflect.Ptr && w.k.IsNil() { return nil } buf, err := tm.MarshalText() - w.s = string(buf) + w.ks = string(buf) return err } - switch w.v.Kind() { + switch w.k.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) + w.ks = strconv.FormatInt(w.k.Int(), 10) return nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) + w.ks = strconv.FormatUint(w.k.Uint(), 10) return nil } panic("unexpected map key type") @@ -1195,19 +1252,18 @@ func typeFields(t reflect.Type) structFields { // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) - isUnexported := sf.PkgPath != "" if sf.Anonymous { t := sf.Type if t.Kind() == reflect.Ptr { t = t.Elem() } - if isUnexported && t.Kind() != reflect.Struct { + if !sf.IsExported() && t.Kind() != reflect.Struct { // Ignore embedded fields of unexported non-struct types. continue } // Do not ignore embedded fields of unexported struct types // since they may have exported fields. - } else if isUnexported { + } else if !sf.IsExported() { // Ignore unexported non-embedded fields. continue } diff --git a/mantle/vendor/github.com/clarketm/json/fuzz.go b/mantle/vendor/github.com/clarketm/json/fuzz.go index be03f0d7..d3fa2d11 100644 --- a/mantle/vendor/github.com/clarketm/json/fuzz.go +++ b/mantle/vendor/github.com/clarketm/json/fuzz.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gofuzz // +build gofuzz package json diff --git a/mantle/vendor/github.com/clarketm/json/scanner.go b/mantle/vendor/github.com/clarketm/json/scanner.go index 552bd703..9dc1903e 100644 --- a/mantle/vendor/github.com/clarketm/json/scanner.go +++ b/mantle/vendor/github.com/clarketm/json/scanner.go @@ -139,6 +139,10 @@ const ( parseArrayValue // parsing array value ) +// This limits the max nesting depth to prevent stack overflow. +// This is permitted by https://tools.ietf.org/html/rfc7159#section-9 +const maxNestingDepth = 10000 + // reset prepares the scanner for use. // It must be called before calling s.step. func (s *scanner) reset() { @@ -168,8 +172,13 @@ func (s *scanner) eof() int { } // pushParseState pushes a new parse state p onto the parse stack. -func (s *scanner) pushParseState(p int) { - s.parseState = append(s.parseState, p) +// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned. +func (s *scanner) pushParseState(c byte, newParseState int, successState int) int { + s.parseState = append(s.parseState, newParseState) + if len(s.parseState) <= maxNestingDepth { + return successState + } + return s.error(c, "exceeded max depth") } // popParseState pops a parse state (already obtained) off the stack @@ -186,12 +195,12 @@ func (s *scanner) popParseState() { } func isSpace(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' + return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') } // stateBeginValueOrEmpty is the state after reading `[`. func stateBeginValueOrEmpty(s *scanner, c byte) int { - if c <= ' ' && isSpace(c) { + if isSpace(c) { return scanSkipSpace } if c == ']' { @@ -202,18 +211,16 @@ func stateBeginValueOrEmpty(s *scanner, c byte) int { // stateBeginValue is the state at the beginning of the input. func stateBeginValue(s *scanner, c byte) int { - if c <= ' ' && isSpace(c) { + if isSpace(c) { return scanSkipSpace } switch c { case '{': s.step = stateBeginStringOrEmpty - s.pushParseState(parseObjectKey) - return scanBeginObject + return s.pushParseState(c, parseObjectKey, scanBeginObject) case '[': s.step = stateBeginValueOrEmpty - s.pushParseState(parseArrayValue) - return scanBeginArray + return s.pushParseState(c, parseArrayValue, scanBeginArray) case '"': s.step = stateInString return scanBeginLiteral @@ -242,7 +249,7 @@ func stateBeginValue(s *scanner, c byte) int { // stateBeginStringOrEmpty is the state after reading `{`. func stateBeginStringOrEmpty(s *scanner, c byte) int { - if c <= ' ' && isSpace(c) { + if isSpace(c) { return scanSkipSpace } if c == '}' { @@ -255,7 +262,7 @@ func stateBeginStringOrEmpty(s *scanner, c byte) int { // stateBeginString is the state after reading `{"key": value,`. func stateBeginString(s *scanner, c byte) int { - if c <= ' ' && isSpace(c) { + if isSpace(c) { return scanSkipSpace } if c == '"' { @@ -275,7 +282,7 @@ func stateEndValue(s *scanner, c byte) int { s.endTop = true return stateEndTop(s, c) } - if c <= ' ' && isSpace(c) { + if isSpace(c) { s.step = stateEndValue return scanSkipSpace } diff --git a/mantle/vendor/github.com/coreos/butane/base/util/file.go b/mantle/vendor/github.com/coreos/butane/base/util/file.go index a6f91680..73e24d4c 100644 --- a/mantle/vendor/github.com/coreos/butane/base/util/file.go +++ b/mantle/vendor/github.com/coreos/butane/base/util/file.go @@ -36,8 +36,8 @@ func EnsurePathWithinFilesDir(path, filesDir string) error { return nil } -/// CheckForDecimalMode fails if the specified mode appears to have been -/// incorrectly specified in decimal instead of octal. +// CheckForDecimalMode fails if the specified mode appears to have been +// incorrectly specified in decimal instead of octal. func CheckForDecimalMode(mode int, directory bool) error { correctedMode, ok := decimalModeToOctal(mode) if !ok { @@ -49,9 +49,9 @@ func CheckForDecimalMode(mode int, directory bool) error { return nil } -/// isTypicalMode returns true if the specified mode is unsurprising. -/// It returns false for some modes that are unusual but valid in limited -/// cases. +// isTypicalMode returns true if the specified mode is unsurprising. +// It returns false for some modes that are unusual but valid in limited +// cases. func isTypicalMode(mode int, directory bool) bool { // no permissions is always reasonable (root ignores mode bits) if mode == 0 { @@ -126,8 +126,8 @@ func isTypicalMode(mode int, directory bool) bool { return true } -/// decimalModeToOctal takes a mode written in decimal and converts it to -/// octal, returning (0, false) on failure. +// decimalModeToOctal takes a mode written in decimal and converts it to +// octal, returning (0, false) on failure. func decimalModeToOctal(mode int) (int, bool) { if mode < 0 || mode > 7777 { // out of range diff --git a/mantle/vendor/github.com/coreos/butane/base/util/url.go b/mantle/vendor/github.com/coreos/butane/base/util/url.go index 2cf3bef0..b7bc0359 100644 --- a/mantle/vendor/github.com/coreos/butane/base/util/url.go +++ b/mantle/vendor/github.com/coreos/butane/base/util/url.go @@ -24,9 +24,24 @@ import ( "github.com/vincent-petithory/dataurl" ) -func MakeDataURL(contents []byte, currentCompression *string, allowCompression bool) (uri string, gzipped bool, err error) { +func MakeDataURL(contents []byte, currentCompression *string, allowCompression bool) (uri string, compression *string, err error) { // try three different encodings, and select the smallest one + if util.NilOrEmpty(currentCompression) { + // The config does not specify compression. We need to + // explicitly set the compression field to avoid a child + // config inheriting a compression setting from the parent, + // which may not have used the same compression algorithm. + compression = util.StrToPtr("") + } else { + // The config specifies compression, meaning that the + // contents were compressed by the user, so we can pick a + // data URL encoding but we can't compress again. Return a + // nil compression value so the caller knows not to record a + // translation from input contents to output compression. + compression = nil + } + // URL-escaped, useful for ASCII text opaque := "," + dataurl.Escape(contents) @@ -53,10 +68,10 @@ func MakeDataURL(contents []byte, currentCompression *string, allowCompression b return } gz := ";base64," + base64.StdEncoding.EncodeToString(buf.Bytes()) - // Account for space needed by "compression": "gzip". - if len(gz)+25 < len(opaque) { + // Account for space needed by the compression value + if len(gz)+len("gzip") < len(opaque) { opaque = gz - gzipped = true + compression = util.StrToPtr("gzip") } } diff --git a/mantle/vendor/github.com/coreos/butane/base/v0_2/translate.go b/mantle/vendor/github.com/coreos/butane/base/v0_2/translate.go index 9199861e..3d05bd0e 100644 --- a/mantle/vendor/github.com/coreos/butane/base/v0_2/translate.go +++ b/mantle/vendor/github.com/coreos/butane/base/v0_2/translate.go @@ -15,8 +15,8 @@ package v0_2 import ( - "io/ioutil" "os" + slashpath "path" "path/filepath" "strings" "text/template" @@ -25,7 +25,7 @@ import ( "github.com/coreos/butane/config/common" "github.com/coreos/butane/translate" - "github.com/coreos/go-systemd/unit" + "github.com/coreos/go-systemd/v22/unit" "github.com/coreos/ignition/v2/config/util" "github.com/coreos/ignition/v2/config/v3_1/types" "github.com/coreos/vcontext/path" @@ -127,27 +127,27 @@ func translateResource(from Resource, options common.TranslateOptions) (to types // calculate file path within FilesDir and check for // path traversal - filePath := filepath.Join(options.FilesDir, *from.Local) + filePath := filepath.Join(options.FilesDir, filepath.FromSlash(*from.Local)) if err := baseutil.EnsurePathWithinFilesDir(filePath, options.FilesDir); err != nil { r.AddOnError(c, err) return } - contents, err := ioutil.ReadFile(filePath) + contents, err := os.ReadFile(filePath) if err != nil { r.AddOnError(c, err) return } - src, gzipped, err := baseutil.MakeDataURL(contents, to.Compression, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL(contents, to.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(c, err) return } to.Source = &src tm.AddTranslation(c, path.New("json", "source")) - if gzipped { - to.Compression = util.StrToPtr("gzip") + if compression != nil { + to.Compression = compression tm.AddTranslation(c, path.New("json", "compression")) } } @@ -155,15 +155,15 @@ func translateResource(from Resource, options common.TranslateOptions) (to types if from.Inline != nil { c := path.New("yaml", "inline") - src, gzipped, err := baseutil.MakeDataURL([]byte(*from.Inline), to.Compression, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL([]byte(*from.Inline), to.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(c, err) return } to.Source = &src tm.AddTranslation(c, path.New("json", "source")) - if gzipped { - to.Compression = util.StrToPtr("gzip") + if compression != nil { + to.Compression = compression tm.AddTranslation(c, path.New("json", "compression")) } } @@ -208,7 +208,7 @@ func (c Config) processTrees(ret *types.Config, options common.TranslateOptions) // calculate base path within FilesDir and check for // path traversal - srcBaseDir := filepath.Join(options.FilesDir, tree.Local) + srcBaseDir := filepath.Join(options.FilesDir, filepath.FromSlash(tree.Local)) if err := baseutil.EnsurePathWithinFilesDir(srcBaseDir, options.FilesDir); err != nil { r.AddOnError(yamlPath, err) continue @@ -246,7 +246,7 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report r.AddOnError(yamlPath, err) return nil } - destPath := filepath.Join(destBaseDir, relPath) + destPath := slashpath.Join(destBaseDir, filepath.ToSlash(relPath)) if info.Mode().IsDir() { return nil @@ -272,20 +272,20 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report ts.AddTranslation(yamlPath, path.New("json", "storage", "files")) } } - contents, err := ioutil.ReadFile(srcPath) + contents, err := os.ReadFile(srcPath) if err != nil { r.AddOnError(yamlPath, err) return nil } - url, gzipped, err := baseutil.MakeDataURL(contents, file.Contents.Compression, !options.NoResourceAutoCompression) + url, compression, err := baseutil.MakeDataURL(contents, file.Contents.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(yamlPath, err) return nil } - file.Contents.Source = util.StrToPtr(url) + file.Contents.Source = &url ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents", "source")) - if gzipped { - file.Contents.Compression = util.StrToPtr("gzip") + if compression != nil { + file.Contents.Compression = compression ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents", "compression")) } ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents")) @@ -319,11 +319,12 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report ts.AddTranslation(yamlPath, path.New("json", "storage", "links")) } } - link.Target, err = os.Readlink(srcPath) + target, err := os.Readlink(srcPath) if err != nil { r.AddOnError(yamlPath, err) return nil } + link.Target = filepath.ToSlash(target) ts.AddTranslation(yamlPath, path.New("json", "storage", "links", i, "target")) } else { r.AddOnError(yamlPath, common.ErrFileType) diff --git a/mantle/vendor/github.com/coreos/butane/base/v0_3/translate.go b/mantle/vendor/github.com/coreos/butane/base/v0_3/translate.go index cf876342..35dec7f7 100644 --- a/mantle/vendor/github.com/coreos/butane/base/v0_3/translate.go +++ b/mantle/vendor/github.com/coreos/butane/base/v0_3/translate.go @@ -16,8 +16,8 @@ package v0_3 import ( "fmt" - "io/ioutil" "os" + slashpath "path" "path/filepath" "strings" "text/template" @@ -26,7 +26,7 @@ import ( "github.com/coreos/butane/config/common" "github.com/coreos/butane/translate" - "github.com/coreos/go-systemd/unit" + "github.com/coreos/go-systemd/v22/unit" "github.com/coreos/ignition/v2/config/util" "github.com/coreos/ignition/v2/config/v3_2/types" "github.com/coreos/vcontext/path" @@ -138,27 +138,27 @@ func translateResource(from Resource, options common.TranslateOptions) (to types // calculate file path within FilesDir and check for // path traversal - filePath := filepath.Join(options.FilesDir, *from.Local) + filePath := filepath.Join(options.FilesDir, filepath.FromSlash(*from.Local)) if err := baseutil.EnsurePathWithinFilesDir(filePath, options.FilesDir); err != nil { r.AddOnError(c, err) return } - contents, err := ioutil.ReadFile(filePath) + contents, err := os.ReadFile(filePath) if err != nil { r.AddOnError(c, err) return } - src, gzipped, err := baseutil.MakeDataURL(contents, to.Compression, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL(contents, to.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(c, err) return } to.Source = &src tm.AddTranslation(c, path.New("json", "source")) - if gzipped { - to.Compression = util.StrToPtr("gzip") + if compression != nil { + to.Compression = compression tm.AddTranslation(c, path.New("json", "compression")) } } @@ -166,15 +166,15 @@ func translateResource(from Resource, options common.TranslateOptions) (to types if from.Inline != nil { c := path.New("yaml", "inline") - src, gzipped, err := baseutil.MakeDataURL([]byte(*from.Inline), to.Compression, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL([]byte(*from.Inline), to.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(c, err) return } to.Source = &src tm.AddTranslation(c, path.New("json", "source")) - if gzipped { - to.Compression = util.StrToPtr("gzip") + if compression != nil { + to.Compression = compression tm.AddTranslation(c, path.New("json", "compression")) } } @@ -219,7 +219,7 @@ func (c Config) processTrees(ret *types.Config, options common.TranslateOptions) // calculate base path within FilesDir and check for // path traversal - srcBaseDir := filepath.Join(options.FilesDir, tree.Local) + srcBaseDir := filepath.Join(options.FilesDir, filepath.FromSlash(tree.Local)) if err := baseutil.EnsurePathWithinFilesDir(srcBaseDir, options.FilesDir); err != nil { r.AddOnError(yamlPath, err) continue @@ -257,7 +257,7 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report r.AddOnError(yamlPath, err) return nil } - destPath := filepath.Join(destBaseDir, relPath) + destPath := slashpath.Join(destBaseDir, filepath.ToSlash(relPath)) if info.Mode().IsDir() { return nil @@ -283,20 +283,20 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report ts.AddTranslation(yamlPath, path.New("json", "storage", "files")) } } - contents, err := ioutil.ReadFile(srcPath) + contents, err := os.ReadFile(srcPath) if err != nil { r.AddOnError(yamlPath, err) return nil } - url, gzipped, err := baseutil.MakeDataURL(contents, file.Contents.Compression, !options.NoResourceAutoCompression) + url, compression, err := baseutil.MakeDataURL(contents, file.Contents.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(yamlPath, err) return nil } - file.Contents.Source = util.StrToPtr(url) + file.Contents.Source = &url ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents", "source")) - if gzipped { - file.Contents.Compression = util.StrToPtr("gzip") + if compression != nil { + file.Contents.Compression = compression ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents", "compression")) } ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents")) @@ -330,11 +330,12 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report ts.AddTranslation(yamlPath, path.New("json", "storage", "links")) } } - link.Target, err = os.Readlink(srcPath) + target, err := os.Readlink(srcPath) if err != nil { r.AddOnError(yamlPath, err) return nil } + link.Target = filepath.ToSlash(target) ts.AddTranslation(yamlPath, path.New("json", "storage", "links", i, "target")) } else { r.AddOnError(yamlPath, common.ErrFileType) diff --git a/mantle/vendor/github.com/coreos/butane/base/v0_4/translate.go b/mantle/vendor/github.com/coreos/butane/base/v0_4/translate.go index b17492bd..9e2caa85 100644 --- a/mantle/vendor/github.com/coreos/butane/base/v0_4/translate.go +++ b/mantle/vendor/github.com/coreos/butane/base/v0_4/translate.go @@ -16,8 +16,8 @@ package v0_4 import ( "fmt" - "io/ioutil" "os" + slashpath "path" "path/filepath" "strings" "text/template" @@ -26,7 +26,7 @@ import ( "github.com/coreos/butane/config/common" "github.com/coreos/butane/translate" - "github.com/coreos/go-systemd/unit" + "github.com/coreos/go-systemd/v22/unit" "github.com/coreos/ignition/v2/config/util" "github.com/coreos/ignition/v2/config/v3_3/types" "github.com/coreos/vcontext/path" @@ -153,27 +153,27 @@ func translateResource(from Resource, options common.TranslateOptions) (to types // calculate file path within FilesDir and check for // path traversal - filePath := filepath.Join(options.FilesDir, *from.Local) + filePath := filepath.Join(options.FilesDir, filepath.FromSlash(*from.Local)) if err := baseutil.EnsurePathWithinFilesDir(filePath, options.FilesDir); err != nil { r.AddOnError(c, err) return } - contents, err := ioutil.ReadFile(filePath) + contents, err := os.ReadFile(filePath) if err != nil { r.AddOnError(c, err) return } - src, gzipped, err := baseutil.MakeDataURL(contents, to.Compression, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL(contents, to.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(c, err) return } to.Source = &src tm.AddTranslation(c, path.New("json", "source")) - if gzipped { - to.Compression = util.StrToPtr("gzip") + if compression != nil { + to.Compression = compression tm.AddTranslation(c, path.New("json", "compression")) } } @@ -181,15 +181,15 @@ func translateResource(from Resource, options common.TranslateOptions) (to types if from.Inline != nil { c := path.New("yaml", "inline") - src, gzipped, err := baseutil.MakeDataURL([]byte(*from.Inline), to.Compression, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL([]byte(*from.Inline), to.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(c, err) return } to.Source = &src tm.AddTranslation(c, path.New("json", "source")) - if gzipped { - to.Compression = util.StrToPtr("gzip") + if compression != nil { + to.Compression = compression tm.AddTranslation(c, path.New("json", "compression")) } } @@ -234,7 +234,7 @@ func (c Config) processTrees(ret *types.Config, options common.TranslateOptions) // calculate base path within FilesDir and check for // path traversal - srcBaseDir := filepath.Join(options.FilesDir, tree.Local) + srcBaseDir := filepath.Join(options.FilesDir, filepath.FromSlash(tree.Local)) if err := baseutil.EnsurePathWithinFilesDir(srcBaseDir, options.FilesDir); err != nil { r.AddOnError(yamlPath, err) continue @@ -272,7 +272,7 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report r.AddOnError(yamlPath, err) return nil } - destPath := filepath.Join(destBaseDir, relPath) + destPath := slashpath.Join(destBaseDir, filepath.ToSlash(relPath)) if info.Mode().IsDir() { return nil @@ -298,20 +298,20 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report ts.AddTranslation(yamlPath, path.New("json", "storage", "files")) } } - contents, err := ioutil.ReadFile(srcPath) + contents, err := os.ReadFile(srcPath) if err != nil { r.AddOnError(yamlPath, err) return nil } - url, gzipped, err := baseutil.MakeDataURL(contents, file.Contents.Compression, !options.NoResourceAutoCompression) + url, compression, err := baseutil.MakeDataURL(contents, file.Contents.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(yamlPath, err) return nil } - file.Contents.Source = util.StrToPtr(url) + file.Contents.Source = &url ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents", "source")) - if gzipped { - file.Contents.Compression = util.StrToPtr("gzip") + if compression != nil { + file.Contents.Compression = compression ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents", "compression")) } ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents")) @@ -350,7 +350,7 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report r.AddOnError(yamlPath, err) return nil } - link.Target = &target + link.Target = util.StrToPtr(filepath.ToSlash(target)) ts.AddTranslation(yamlPath, path.New("json", "storage", "links", i, "target")) } else { r.AddOnError(yamlPath, common.ErrFileType) diff --git a/mantle/vendor/github.com/coreos/butane/base/v0_5_exp/translate.go b/mantle/vendor/github.com/coreos/butane/base/v0_5_exp/translate.go index 960cd9e1..d53aa05d 100644 --- a/mantle/vendor/github.com/coreos/butane/base/v0_5_exp/translate.go +++ b/mantle/vendor/github.com/coreos/butane/base/v0_5_exp/translate.go @@ -16,8 +16,8 @@ package v0_5_exp import ( "fmt" - "io/ioutil" "os" + slashpath "path" "path/filepath" "strings" "text/template" @@ -26,7 +26,7 @@ import ( "github.com/coreos/butane/config/common" "github.com/coreos/butane/translate" - "github.com/coreos/go-systemd/unit" + "github.com/coreos/go-systemd/v22/unit" "github.com/coreos/ignition/v2/config/util" "github.com/coreos/ignition/v2/config/v3_4_experimental/types" "github.com/coreos/vcontext/path" @@ -153,27 +153,27 @@ func translateResource(from Resource, options common.TranslateOptions) (to types // calculate file path within FilesDir and check for // path traversal - filePath := filepath.Join(options.FilesDir, *from.Local) + filePath := filepath.Join(options.FilesDir, filepath.FromSlash(*from.Local)) if err := baseutil.EnsurePathWithinFilesDir(filePath, options.FilesDir); err != nil { r.AddOnError(c, err) return } - contents, err := ioutil.ReadFile(filePath) + contents, err := os.ReadFile(filePath) if err != nil { r.AddOnError(c, err) return } - src, gzipped, err := baseutil.MakeDataURL(contents, to.Compression, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL(contents, to.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(c, err) return } to.Source = &src tm.AddTranslation(c, path.New("json", "source")) - if gzipped { - to.Compression = util.StrToPtr("gzip") + if compression != nil { + to.Compression = compression tm.AddTranslation(c, path.New("json", "compression")) } } @@ -181,15 +181,15 @@ func translateResource(from Resource, options common.TranslateOptions) (to types if from.Inline != nil { c := path.New("yaml", "inline") - src, gzipped, err := baseutil.MakeDataURL([]byte(*from.Inline), to.Compression, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL([]byte(*from.Inline), to.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(c, err) return } to.Source = &src tm.AddTranslation(c, path.New("json", "source")) - if gzipped { - to.Compression = util.StrToPtr("gzip") + if compression != nil { + to.Compression = compression tm.AddTranslation(c, path.New("json", "compression")) } } @@ -234,7 +234,7 @@ func (c Config) processTrees(ret *types.Config, options common.TranslateOptions) // calculate base path within FilesDir and check for // path traversal - srcBaseDir := filepath.Join(options.FilesDir, tree.Local) + srcBaseDir := filepath.Join(options.FilesDir, filepath.FromSlash(tree.Local)) if err := baseutil.EnsurePathWithinFilesDir(srcBaseDir, options.FilesDir); err != nil { r.AddOnError(yamlPath, err) continue @@ -272,7 +272,7 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report r.AddOnError(yamlPath, err) return nil } - destPath := filepath.Join(destBaseDir, relPath) + destPath := slashpath.Join(destBaseDir, filepath.ToSlash(relPath)) if info.Mode().IsDir() { return nil @@ -298,20 +298,20 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report ts.AddTranslation(yamlPath, path.New("json", "storage", "files")) } } - contents, err := ioutil.ReadFile(srcPath) + contents, err := os.ReadFile(srcPath) if err != nil { r.AddOnError(yamlPath, err) return nil } - url, gzipped, err := baseutil.MakeDataURL(contents, file.Contents.Compression, !options.NoResourceAutoCompression) + url, compression, err := baseutil.MakeDataURL(contents, file.Contents.Compression, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(yamlPath, err) return nil } - file.Contents.Source = util.StrToPtr(url) + file.Contents.Source = &url ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents", "source")) - if gzipped { - file.Contents.Compression = util.StrToPtr("gzip") + if compression != nil { + file.Contents.Compression = compression ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents", "compression")) } ts.AddTranslation(yamlPath, path.New("json", "storage", "files", i, "contents")) @@ -350,7 +350,7 @@ func walkTree(yamlPath path.ContextPath, ts *translate.TranslationSet, r *report r.AddOnError(yamlPath, err) return nil } - link.Target = &target + link.Target = util.StrToPtr(filepath.ToSlash(target)) ts.AddTranslation(yamlPath, path.New("json", "storage", "links", i, "target")) } else { r.AddOnError(yamlPath, common.ErrFileType) diff --git a/mantle/vendor/github.com/coreos/butane/config/common/errors.go b/mantle/vendor/github.com/coreos/butane/config/common/errors.go index 38313835..1961f007 100644 --- a/mantle/vendor/github.com/coreos/butane/config/common/errors.go +++ b/mantle/vendor/github.com/coreos/butane/config/common/errors.go @@ -64,12 +64,20 @@ var ( ErrFileSchemeSupport = errors.New("file contents source must be data URL in this spec version") ErrFileAppendSupport = errors.New("appending to files is not supported in this spec version") ErrFileCompressionSupport = errors.New("file compression is not supported in this spec version") + ErrFileSpecialModeSupport = errors.New("special mode bits are not supported in this spec version") ErrLinkSupport = errors.New("links are not supported in this spec version") ErrGroupSupport = errors.New("groups are not supported in this spec version") ErrUserFieldSupport = errors.New("fields other than \"name\" and \"ssh_authorized_keys\" are not supported in this spec version") ErrUserNameSupport = errors.New("users other than \"core\" are not supported in this spec version") ErrKernelArgumentSupport = errors.New("this field cannot be used for kernel arguments in this spec version; use openshift.kernel_arguments instead") + // Storage + ErrClevisSupport = errors.New("clevis is not supported in this spec version") + // Extensions ErrExtensionNameRequired = errors.New("field \"name\" is required") + + // Grub + ErrGrubUserNameNotSpecified = errors.New("field \"name\" is required") + ErrGrubPasswordNotSpecified = errors.New("field \"password_hash\" is required") ) diff --git a/mantle/vendor/github.com/coreos/butane/config/config.go b/mantle/vendor/github.com/coreos/butane/config/config.go index b71bcf4f..61719477 100644 --- a/mantle/vendor/github.com/coreos/butane/config/config.go +++ b/mantle/vendor/github.com/coreos/butane/config/config.go @@ -24,8 +24,12 @@ import ( fcos1_3 "github.com/coreos/butane/config/fcos/v1_3" fcos1_4 "github.com/coreos/butane/config/fcos/v1_4" fcos1_5_exp "github.com/coreos/butane/config/fcos/v1_5_exp" + flatcar1_0 "github.com/coreos/butane/config/flatcar/v1_0" + flatcar1_1_exp "github.com/coreos/butane/config/flatcar/v1_1_exp" openshift4_10 "github.com/coreos/butane/config/openshift/v4_10" - openshift4_11_exp "github.com/coreos/butane/config/openshift/v4_11_exp" + openshift4_11 "github.com/coreos/butane/config/openshift/v4_11" + openshift4_12 "github.com/coreos/butane/config/openshift/v4_12" + openshift4_13_exp "github.com/coreos/butane/config/openshift/v4_13_exp" openshift4_8 "github.com/coreos/butane/config/openshift/v4_8" openshift4_9 "github.com/coreos/butane/config/openshift/v4_9" rhcos0_1 "github.com/coreos/butane/config/rhcos/v0_1" @@ -39,7 +43,7 @@ var ( registry = map[string]translator{} ) -/// Fields that must be included in the root struct of every spec version. +// Fields that must be included in the root struct of every spec version. type commonFields struct { Version string `yaml:"version"` Variant string `yaml:"variant"` @@ -52,16 +56,20 @@ func init() { RegisterTranslator("fcos", "1.3.0", fcos1_3.ToIgn3_2Bytes) RegisterTranslator("fcos", "1.4.0", fcos1_4.ToIgn3_3Bytes) RegisterTranslator("fcos", "1.5.0-experimental", fcos1_5_exp.ToIgn3_4Bytes) + RegisterTranslator("flatcar", "1.0.0", flatcar1_0.ToIgn3_3Bytes) + RegisterTranslator("flatcar", "1.1.0-experimental", flatcar1_1_exp.ToIgn3_4Bytes) RegisterTranslator("openshift", "4.8.0", openshift4_8.ToConfigBytes) RegisterTranslator("openshift", "4.9.0", openshift4_9.ToConfigBytes) RegisterTranslator("openshift", "4.10.0", openshift4_10.ToConfigBytes) - RegisterTranslator("openshift", "4.11.0-experimental", openshift4_11_exp.ToConfigBytes) + RegisterTranslator("openshift", "4.11.0", openshift4_11.ToConfigBytes) + RegisterTranslator("openshift", "4.12.0", openshift4_12.ToConfigBytes) + RegisterTranslator("openshift", "4.13.0-experimental", openshift4_13_exp.ToConfigBytes) RegisterTranslator("rhcos", "0.1.0", rhcos0_1.ToIgn3_2Bytes) } -/// RegisterTranslator registers a translator for the specified variant and -/// version to be available for use by TranslateBytes. This is only needed -/// by users implementing their own translators outside the Butane package. +// RegisterTranslator registers a translator for the specified variant and +// version to be available for use by TranslateBytes. This is only needed +// by users implementing their own translators outside the Butane package. func RegisterTranslator(variant, version string, trans translator) { key := fmt.Sprintf("%s+%s", variant, version) if _, ok := registry[key]; ok { diff --git a/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/schema.go b/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/schema.go index fc174dac..d985413d 100644 --- a/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/schema.go +++ b/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/schema.go @@ -22,6 +22,7 @@ type Config struct { base.Config `yaml:",inline"` BootDevice BootDevice `yaml:"boot_device"` Extensions []Extension `yaml:"extensions"` + Grub Grub `yaml:"grub"` } type BootDevice struct { @@ -43,3 +44,12 @@ type BootDeviceMirror struct { type Extension struct { Name string `yaml:"name"` } + +type Grub struct { + Users []GrubUser `yaml:"users"` +} + +type GrubUser struct { + Name string `yaml:"name"` + PasswordHash *string `yaml:"password_hash"` +} diff --git a/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/translate.go b/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/translate.go index c941af28..f2eec2fd 100644 --- a/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/translate.go +++ b/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/translate.go @@ -18,6 +18,7 @@ import ( "crypto/sha256" "encoding/hex" "fmt" + "strings" baseutil "github.com/coreos/butane/base/util" "github.com/coreos/butane/config/common" @@ -86,6 +87,11 @@ func (c Config) ToIgn3_4Unvalidated(options common.TranslateOptions) (types.Conf retConfig, ts := baseutil.MergeTranslatedConfigs(retp, tsp, ret, ts) ret = retConfig.(types.Config) r.Merge(rp) + + retp, tsp, rp = c.handleUserGrubCfg(options) + retConfig, ts = baseutil.MergeTranslatedConfigs(retp, tsp, ret, ts) + ret = retConfig.(types.Config) + r.Merge(rp) return ret, ts, r } @@ -323,7 +329,7 @@ func (c Config) processPackages(options common.TranslateOptions) (types.Config, return ret, ts, r } fullYamlContents := append([]byte("# Generated by Butane\n\n"), treeFileContents...) - src, gzipped, err := baseutil.MakeDataURL(fullYamlContents, nil, !options.NoResourceAutoCompression) + src, compression, err := baseutil.MakeDataURL(fullYamlContents, nil, !options.NoResourceAutoCompression) if err != nil { r.AddOnError(yamlPath, err) return ret, ts, r @@ -337,16 +343,74 @@ func (c Config) processPackages(options common.TranslateOptions) (types.Config, }, FileEmbedded1: types.FileEmbedded1{ Contents: types.Resource{ - Source: util.StrToPtr(src), + Source: &src, + Compression: compression, }, Mode: util.IntToPtr(0644), }, } - if gzipped { - file.Contents.Compression = util.StrToPtr("gzip") - } ret.Storage.Files = append(ret.Storage.Files, file) ts.AddFromCommonSource(yamlPath, path.New("json", "storage"), ret.Storage) return ret, ts, r } + +func (c Config) handleUserGrubCfg(options common.TranslateOptions) (types.Config, translate.TranslationSet, report.Report) { + rendered := types.Config{} + ts := translate.NewTranslationSet("yaml", "json") + var r report.Report + yamlPath := path.New("yaml", "grub", "users") + if len(c.Grub.Users) == 0 { + // No users + return rendered, ts, r + } + + // create boot filesystem + rendered.Storage.Filesystems = append(rendered.Storage.Filesystems, + types.Filesystem{ + Device: "/dev/disk/by-label/boot", + Format: util.StrToPtr("ext4"), + Path: util.StrToPtr("/boot"), + }) + + userCfgContent := []byte(buildGrubConfig(c.Grub)) + src, compression, err := baseutil.MakeDataURL(userCfgContent, nil, !options.NoResourceAutoCompression) + if err != nil { + r.AddOnError(yamlPath, err) + return rendered, ts, r + } + + // Create user.cfg file and add it to rendered config + rendered.Storage.Files = append(rendered.Storage.Files, + types.File{ + Node: types.Node{ + Path: "/boot/grub2/user.cfg", + }, + FileEmbedded1: types.FileEmbedded1{ + Append: []types.Resource{ + { + Source: util.StrToPtr(src), + Compression: compression, + }, + }, + }, + }) + + ts.AddFromCommonSource(yamlPath, path.New("json", "storage"), rendered.Storage) + return rendered, ts, r +} + +func buildGrubConfig(gb Grub) string { + // Process super users and corresponding passwords + allUsers := []string{} + cmds := []string{} + + for _, user := range gb.Users { + // We have already validated that user.Name and user.PasswordHash are non-empty + allUsers = append(allUsers, user.Name) + // Command for setting users password + cmds = append(cmds, fmt.Sprintf("password_pbkdf2 %s %s", user.Name, *user.PasswordHash)) + } + superUserCmd := fmt.Sprintf("set superusers=\"%s\"\n", strings.Join(allUsers, " ")) + return "# Generated by Butane\n\n" + superUserCmd + strings.Join(cmds, "\n") + "\n" +} diff --git a/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/validate.go b/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/validate.go index 97b2907a..61cf290d 100644 --- a/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/validate.go +++ b/mantle/vendor/github.com/coreos/butane/config/fcos/v1_5_exp/validate.go @@ -16,6 +16,7 @@ package v1_5_exp import ( "github.com/coreos/butane/config/common" + "github.com/coreos/ignition/v2/config/util" "github.com/coreos/vcontext/path" "github.com/coreos/vcontext/report" @@ -46,3 +47,14 @@ func (e Extension) Validate(c path.ContextPath) (r report.Report) { } return } + +func (user GrubUser) Validate(c path.ContextPath) (r report.Report) { + if user.Name == "" { + r.AddOnError(c.Append("name"), common.ErrGrubUserNameNotSpecified) + } + + if !util.NotEmpty(user.PasswordHash) { + r.AddOnError(c.Append("password_hash"), common.ErrGrubPasswordNotSpecified) + } + return +} diff --git a/mantle/storage/error.go b/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_0/schema.go similarity index 70% rename from mantle/storage/error.go rename to mantle/vendor/github.com/coreos/butane/config/flatcar/v1_0/schema.go index 322ed7ac..6a1d7366 100644 --- a/mantle/storage/error.go +++ b/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_0/schema.go @@ -1,4 +1,4 @@ -// Copyright 2016 CoreOS, Inc. +// Copyright 2020 Red Hat, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -10,16 +10,14 @@ // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -// limitations under the License. +// limitations under the License.) -package storage +package v1_0 -type Error struct { - Op string - URL string - Err error -} +import ( + base "github.com/coreos/butane/base/v0_4" +) -func (e *Error) Error() string { - return e.Op + " " + e.URL + ": " + e.Err.Error() +type Config struct { + base.Config `yaml:",inline"` } diff --git a/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_0/translate.go b/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_0/translate.go new file mode 100644 index 00000000..498c329f --- /dev/null +++ b/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_0/translate.go @@ -0,0 +1,60 @@ +// Copyright 2020 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.) + +package v1_0 + +import ( + "github.com/coreos/butane/config/common" + cutil "github.com/coreos/butane/config/util" + "github.com/coreos/butane/translate" + + "github.com/coreos/ignition/v2/config/v3_3/types" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +// ToIgn3_3Unvalidated translates the config to an Ignition config. It also +// returns the set of translations it did so paths in the resultant config +// can be tracked back to their source in the source config. No config +// validation is performed on input or output. +func (c Config) ToIgn3_3Unvalidated(options common.TranslateOptions) (types.Config, translate.TranslationSet, report.Report) { + ret, ts, r := c.Config.ToIgn3_3Unvalidated(options) + if r.IsFatal() { + return types.Config{}, translate.TranslationSet{}, r + } + + for i, luks := range ret.Storage.Luks { + if luks.Clevis.IsPresent() { + r.AddOnError(path.New("json", "storage", "luks", i, "clevis"), common.ErrClevisSupport) + } + } + + return ret, ts, r +} + +// ToIgn3_3 translates the config to an Ignition config. It returns a +// report of any errors or warnings in the source and resultant config. If +// the report has fatal errors or it encounters other problems translating, +// an error is returned. +func (c Config) ToIgn3_3(options common.TranslateOptions) (types.Config, report.Report, error) { + cfg, r, err := cutil.Translate(c, "ToIgn3_3Unvalidated", options) + return cfg.(types.Config), r, err +} + +// ToIgn3_3Bytes translates from a v1.4 Butane config to a v3.3.0 Ignition config. It returns a report of any errors or +// warnings in the source and resultant config. If the report has fatal errors or it encounters other problems +// translating, an error is returned. +func ToIgn3_3Bytes(input []byte, options common.TranslateBytesOptions) ([]byte, report.Report, error) { + return cutil.TranslateBytes(input, &Config{}, "ToIgn3_3", options) +} diff --git a/mantle/system/arch.go b/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_1_exp/schema.go similarity index 70% rename from mantle/system/arch.go rename to mantle/vendor/github.com/coreos/butane/config/flatcar/v1_1_exp/schema.go index f65b869c..72f1984f 100644 --- a/mantle/system/arch.go +++ b/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_1_exp/schema.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2020 Red Hat, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -10,15 +10,14 @@ // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -// limitations under the License. +// limitations under the License.) -package system +package v1_1_exp import ( - "github.com/coreos/stream-metadata-go/arch" + base "github.com/coreos/butane/base/v0_5_exp" ) -// RpmArch returns the architecture in RPM terms. -func RpmArch() string { - return arch.CurrentRpmArch() +type Config struct { + base.Config `yaml:",inline"` } diff --git a/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_1_exp/translate.go b/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_1_exp/translate.go new file mode 100644 index 00000000..f1c48e9a --- /dev/null +++ b/mantle/vendor/github.com/coreos/butane/config/flatcar/v1_1_exp/translate.go @@ -0,0 +1,60 @@ +// Copyright 2020 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.) + +package v1_1_exp + +import ( + "github.com/coreos/butane/config/common" + cutil "github.com/coreos/butane/config/util" + "github.com/coreos/butane/translate" + + "github.com/coreos/ignition/v2/config/v3_4_experimental/types" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +// ToIgn3_4Unvalidated translates the config to an Ignition config. It also +// returns the set of translations it did so paths in the resultant config +// can be tracked back to their source in the source config. No config +// validation is performed on input or output. +func (c Config) ToIgn3_4Unvalidated(options common.TranslateOptions) (types.Config, translate.TranslationSet, report.Report) { + ret, ts, r := c.Config.ToIgn3_4Unvalidated(options) + if r.IsFatal() { + return types.Config{}, translate.TranslationSet{}, r + } + + for i, luks := range ret.Storage.Luks { + if luks.Clevis.IsPresent() { + r.AddOnError(path.New("json", "storage", "luks", i, "clevis"), common.ErrClevisSupport) + } + } + + return ret, ts, r +} + +// ToIgn3_4 translates the config to an Ignition config. It returns a +// report of any errors or warnings in the source and resultant config. If +// the report has fatal errors or it encounters other problems translating, +// an error is returned. +func (c Config) ToIgn3_4(options common.TranslateOptions) (types.Config, report.Report, error) { + cfg, r, err := cutil.Translate(c, "ToIgn3_4Unvalidated", options) + return cfg.(types.Config), r, err +} + +// ToIgn3_4Bytes translates from a v1.4 Butane config to a v3.3.0 Ignition config. It returns a report of any errors or +// warnings in the source and resultant config. If the report has fatal errors or it encounters other problems +// translating, an error is returned. +func ToIgn3_4Bytes(input []byte, options common.TranslateBytesOptions) ([]byte, report.Report, error) { + return cutil.TranslateBytes(input, &Config{}, "ToIgn3_4", options) +} diff --git a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_10/translate.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_10/translate.go index dc554212..722162cb 100644 --- a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_10/translate.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_10/translate.go @@ -200,6 +200,9 @@ func validateRHCOSSupport(mc result.MachineConfig, ts translate.TranslationSet) func validateMCOSupport(mc result.MachineConfig, ts translate.TranslationSet) report.Report { // Error classes for the purposes of this function: // + // UNPARSABLE - Cannot be rendered into a config by the MCC. If + // present in MC, MCC will mark the pool degraded. We reject these. + // // FORBIDDEN - Not supported by the MCD. If present in MC, MCD will // mark the node degraded. We reject these. // @@ -232,6 +235,10 @@ func validateMCOSupport(mc result.MachineConfig, ts translate.TranslationSet) re r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "contents", "source"), common.ErrFileSchemeSupport) } } + if file.Mode != nil && *file.Mode & ^0777 != 0 { + // UNPARSABLE + r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "mode"), common.ErrFileSpecialModeSupport) + } } for i := range mc.Spec.Config.Storage.Links { // IMMUTABLE diff --git a/mantle/cmd/ore/azure/share-image.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/result/schema.go similarity index 35% rename from mantle/cmd/ore/azure/share-image.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/result/schema.go index 8d8a4038..37e49f30 100644 --- a/mantle/cmd/ore/azure/share-image.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/result/schema.go @@ -1,4 +1,4 @@ -// Copyright 2016 CoreOS, Inc. +// Copyright 2021 Red Hat, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -10,44 +10,39 @@ // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -// limitations under the License. +// limitations under the License.) -package azure +package result import ( - "fmt" - - "github.com/spf13/cobra" + "github.com/coreos/ignition/v2/config/v3_2/types" ) -var ( - cmdShareImage = &cobra.Command{ - Use: "share-image image-name", - Short: "Set permissions on an azure OS image", - RunE: runShareImage, - - SilenceUsage: true, - } - - sharePermission string +const ( + MC_API_VERSION = "machineconfiguration.openshift.io/v1" + MC_KIND = "MachineConfig" ) -func init() { - sv := cmdShareImage.Flags().StringVar +// We round-trip through JSON because Ignition uses `json` struct tags, +// so all struct tags need to be `json` even though we're ultimately +// writing YAML. - sv(&sharePermission, "permission", "public", "Image permission (one of: public, msdn, private)") - - Azure.AddCommand(cmdShareImage) +type MachineConfig struct { + ApiVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata Metadata `json:"metadata"` + Spec Spec `json:"spec"` } -func runShareImage(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return fmt.Errorf("expecting 1 argument, got %d", len(args)) - } - - if sharePermission == "" { - return fmt.Errorf("permission is required") - } +type Metadata struct { + Name string `json:"name"` + Labels map[string]string `json:"labels,omitempty"` +} - return api.ShareImage(args[0], sharePermission) +type Spec struct { + Config types.Config `json:"config"` + KernelArguments []string `json:"kernelArguments,omitempty"` + Extensions []string `json:"extensions,omitempty"` + FIPS *bool `json:"fips,omitempty"` + KernelType *string `json:"kernelType,omitempty"` } diff --git a/mantle/storage/index/set.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/schema.go similarity index 45% rename from mantle/storage/index/set.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/schema.go index 2645614a..eac0a311 100644 --- a/mantle/storage/index/set.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/schema.go @@ -1,4 +1,4 @@ -// Copyright 2016 CoreOS, Inc. +// Copyright 2020 Red Hat, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -10,37 +10,30 @@ // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -// limitations under the License. +// limitations under the License.) -package index +package v4_11 import ( - "strings" - - gs "google.golang.org/api/storage/v1" - - "github.com/coreos/mantle/storage" + fcos "github.com/coreos/butane/config/fcos/v1_3" ) -type IndexSet map[string]struct{} - -func NewIndexSet(bucket *storage.Bucket) IndexSet { - is := IndexSet(make(map[string]struct{})) - - for _, prefix := range bucket.Prefixes() { - is[prefix] = struct{}{} - is[strings.TrimSuffix(prefix, "/")] = struct{}{} - is[prefix+"index.html"] = struct{}{} - } +const ROLE_LABEL_KEY = "machineconfiguration.openshift.io/role" - return is +type Config struct { + fcos.Config `yaml:",inline"` + Metadata Metadata `yaml:"metadata"` + OpenShift OpenShift `yaml:"openshift"` } -func (is IndexSet) IsIndex(obj *gs.Object) bool { - _, isIndex := is[obj.Name] - return isIndex +type Metadata struct { + Name string `yaml:"name"` + Labels map[string]string `yaml:"labels,omitempty"` } -func (is IndexSet) NotIndex(obj *gs.Object) bool { - return !is.IsIndex(obj) +type OpenShift struct { + KernelArguments []string `yaml:"kernel_arguments"` + Extensions []string `yaml:"extensions"` + FIPS *bool `yaml:"fips"` + KernelType *string `yaml:"kernel_type"` } diff --git a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/translate.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/translate.go similarity index 89% rename from mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/translate.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/translate.go index 261f11f2..b19ad933 100644 --- a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/translate.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/translate.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License.) -package v4_11_exp +package v4_11 import ( "net/url" @@ -20,12 +20,12 @@ import ( "strings" "github.com/coreos/butane/config/common" - "github.com/coreos/butane/config/openshift/v4_11_exp/result" + "github.com/coreos/butane/config/openshift/v4_11/result" cutil "github.com/coreos/butane/config/util" "github.com/coreos/butane/translate" "github.com/coreos/ignition/v2/config/util" - "github.com/coreos/ignition/v2/config/v3_4_experimental/types" + "github.com/coreos/ignition/v2/config/v3_2/types" "github.com/coreos/vcontext/path" "github.com/coreos/vcontext/report" ) @@ -42,7 +42,7 @@ const ( // can be tracked back to their source in the source config. No config // validation is performed on input or output. func (c Config) ToMachineConfig4_11Unvalidated(options common.TranslateOptions) (result.MachineConfig, translate.TranslationSet, report.Report) { - cfg, ts, r := c.Config.ToIgn3_4Unvalidated(options) + cfg, ts, r := c.Config.ToIgn3_2Unvalidated(options) if r.IsFatal() { return result.MachineConfig{}, ts, r } @@ -102,11 +102,11 @@ func (c Config) ToMachineConfig4_11(options common.TranslateOptions) (result.Mac return cfg.(result.MachineConfig), r, err } -// ToIgn3_4Unvalidated translates the config to an Ignition config. It also +// ToIgn3_2Unvalidated translates the config to an Ignition config. It also // returns the set of translations it did so paths in the resultant config // can be tracked back to their source in the source config. No config // validation is performed on input or output. -func (c Config) ToIgn3_4Unvalidated(options common.TranslateOptions) (types.Config, translate.TranslationSet, report.Report) { +func (c Config) ToIgn3_2Unvalidated(options common.TranslateOptions) (types.Config, translate.TranslationSet, report.Report) { mc, ts, r := c.ToMachineConfig4_11Unvalidated(options) cfg := mc.Spec.Config @@ -121,12 +121,12 @@ func (c Config) ToIgn3_4Unvalidated(options common.TranslateOptions) (types.Conf return cfg, ts, r } -// ToIgn3_4 translates the config to an Ignition config. It returns a +// ToIgn3_2 translates the config to an Ignition config. It returns a // report of any errors or warnings in the source and resultant config. If // the report has fatal errors or it encounters other problems translating, // an error is returned. -func (c Config) ToIgn3_4(options common.TranslateOptions) (types.Config, report.Report, error) { - cfg, r, err := cutil.Translate(c, "ToIgn3_4Unvalidated", options) +func (c Config) ToIgn3_2(options common.TranslateOptions) (types.Config, report.Report, error) { + cfg, r, err := cutil.Translate(c, "ToIgn3_2Unvalidated", options) return cfg.(types.Config), r, err } @@ -135,7 +135,7 @@ func (c Config) ToIgn3_4(options common.TranslateOptions) (types.Config, report. // translating, an error is returned. func ToConfigBytes(input []byte, options common.TranslateBytesOptions) ([]byte, report.Report, error) { if options.Raw { - return cutil.TranslateBytes(input, &Config{}, "ToIgn3_4", options) + return cutil.TranslateBytes(input, &Config{}, "ToIgn3_2", options) } else { return cutil.TranslateBytesYAML(input, &Config{}, "ToMachineConfig4_11", options) } @@ -206,10 +206,6 @@ func validateMCOSupport(mc result.MachineConfig, ts translate.TranslationSet) re // FORBIDDEN - Not supported by the MCD. If present in MC, MCD will // mark the node degraded. We reject these. // - // REDUNDANT - Feature is also provided by a MachineConfig-specific - // field with different semantics. To reduce confusion, disable - // this implementation. - // // IMMUTABLE - Permitted in MC, passed through to Ignition, but not // supported by the MCD. MCD will mark the node degraded if the // field changes after the node is provisioned. We reject these @@ -222,12 +218,6 @@ func validateMCOSupport(mc result.MachineConfig, ts translate.TranslationSet) re // supported fields. We reject these. var r report.Report - for i, fs := range mc.Spec.Config.Storage.Filesystems { - if fs.Format != nil && *fs.Format == "none" { - // UNPARSABLE - r.AddOnError(path.New("json", "spec", "config", "storage", "filesystems", i, "format"), common.ErrFilesystemNoneSupport) - } - } for i := range mc.Spec.Config.Storage.Directories { // IMMUTABLE r.AddOnError(path.New("json", "spec", "config", "storage", "directories", i), common.ErrDirectorySupport) @@ -245,6 +235,10 @@ func validateMCOSupport(mc result.MachineConfig, ts translate.TranslationSet) re r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "contents", "source"), common.ErrFileSchemeSupport) } } + if file.Mode != nil && *file.Mode & ^0777 != 0 { + // UNPARSABLE + r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "mode"), common.ErrFileSpecialModeSupport) + } } for i := range mc.Spec.Config.Storage.Links { // IMMUTABLE @@ -281,13 +275,5 @@ func validateMCOSupport(mc result.MachineConfig, ts translate.TranslationSet) re r.AddOnError(path.New("json", "spec", "config", "passwd", "users", i), common.ErrUserNameSupport) } } - for i := range mc.Spec.Config.KernelArguments.ShouldExist { - // UNPARSABLE, REDUNDANT - r.AddOnError(path.New("json", "spec", "config", "kernelArguments", "shouldExist", i), common.ErrKernelArgumentSupport) - } - for i := range mc.Spec.Config.KernelArguments.ShouldNotExist { - // UNPARSABLE, REDUNDANT - r.AddOnError(path.New("json", "spec", "config", "kernelArguments", "shouldNotExist", i), common.ErrKernelArgumentSupport) - } return cutil.TranslateReportPaths(r, ts) } diff --git a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/validate.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/validate.go similarity index 98% rename from mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/validate.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/validate.go index 1f551c9e..dd827c2c 100644 --- a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/validate.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11/validate.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License.) -package v4_11_exp +package v4_11 import ( "github.com/coreos/butane/config/common" diff --git a/mantle/cmd/ore/azure/unreplicate-image.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/result/schema.go similarity index 35% rename from mantle/cmd/ore/azure/unreplicate-image.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/result/schema.go index ec6b0f87..37e49f30 100644 --- a/mantle/cmd/ore/azure/unreplicate-image.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/result/schema.go @@ -1,4 +1,4 @@ -// Copyright 2016 CoreOS, Inc. +// Copyright 2021 Red Hat, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -10,34 +10,39 @@ // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -// limitations under the License. +// limitations under the License.) -package azure +package result import ( - "fmt" - - "github.com/spf13/cobra" + "github.com/coreos/ignition/v2/config/v3_2/types" ) -var ( - cmdUnreplicateImage = &cobra.Command{ - Use: "unreplicate-image image", - Short: "Unreplicate an OS image in Azure", - RunE: runUnreplicateImage, - - SilenceUsage: true, - } +const ( + MC_API_VERSION = "machineconfiguration.openshift.io/v1" + MC_KIND = "MachineConfig" ) -func init() { - Azure.AddCommand(cmdUnreplicateImage) +// We round-trip through JSON because Ignition uses `json` struct tags, +// so all struct tags need to be `json` even though we're ultimately +// writing YAML. + +type MachineConfig struct { + ApiVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata Metadata `json:"metadata"` + Spec Spec `json:"spec"` } -func runUnreplicateImage(cmd *cobra.Command, args []string) error { - if len(args) != 1 { - return fmt.Errorf("expecting 1 argument") - } +type Metadata struct { + Name string `json:"name"` + Labels map[string]string `json:"labels,omitempty"` +} - return api.UnreplicateImage(args[0]) +type Spec struct { + Config types.Config `json:"config"` + KernelArguments []string `json:"kernelArguments,omitempty"` + Extensions []string `json:"extensions,omitempty"` + FIPS *bool `json:"fips,omitempty"` + KernelType *string `json:"kernelType,omitempty"` } diff --git a/mantle/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/schema.go similarity index 45% rename from mantle/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/schema.go index e739996d..e143b547 100644 --- a/mantle/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/schema.go @@ -1,6 +1,4 @@ -// +build arm64,linux - -// Minio Cloud Storage, (C) 2016 Minio, Inc. +// Copyright 2020 Red Hat, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,38 +10,30 @@ // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -// limitations under the License. -// +// limitations under the License.) -package sha256 +package v4_12 import ( - "bytes" - "io/ioutil" + fcos "github.com/coreos/butane/config/fcos/v1_3" ) -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} +const ROLE_LABEL_KEY = "machineconfiguration.openshift.io/role" -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 +type Config struct { + fcos.Config `yaml:",inline"` + Metadata Metadata `yaml:"metadata"` + OpenShift OpenShift `yaml:"openshift"` } -func xgetbv(index uint32) (eax, edx uint32) { - return 0, 0 +type Metadata struct { + Name string `yaml:"name"` + Labels map[string]string `yaml:"labels,omitempty"` } -// File to check for cpu capabilities. -const procCPUInfo = "/proc/cpuinfo" - -// Feature to check for. -const sha256Feature = "sha2" - -func haveArmSha() bool { - cpuInfo, err := ioutil.ReadFile(procCPUInfo) - if err != nil { - return false - } - return bytes.Contains(cpuInfo, []byte(sha256Feature)) +type OpenShift struct { + KernelArguments []string `yaml:"kernel_arguments"` + Extensions []string `yaml:"extensions"` + FIPS *bool `yaml:"fips"` + KernelType *string `yaml:"kernel_type"` } diff --git a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/translate.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/translate.go new file mode 100644 index 00000000..4cb433fa --- /dev/null +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/translate.go @@ -0,0 +1,279 @@ +// Copyright 2020 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.) + +package v4_12 + +import ( + "net/url" + "reflect" + "strings" + + "github.com/coreos/butane/config/common" + "github.com/coreos/butane/config/openshift/v4_12/result" + cutil "github.com/coreos/butane/config/util" + "github.com/coreos/butane/translate" + + "github.com/coreos/ignition/v2/config/util" + "github.com/coreos/ignition/v2/config/v3_2/types" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +const ( + // FIPS 140-2 doesn't allow the default XTS mode + fipsCipherOption = types.LuksOption("--cipher") + fipsCipherShortOption = types.LuksOption("-c") + fipsCipherArgument = types.LuksOption("aes-cbc-essiv:sha256") +) + +// ToMachineConfig4_12Unvalidated translates the config to a MachineConfig. It also +// returns the set of translations it did so paths in the resultant config +// can be tracked back to their source in the source config. No config +// validation is performed on input or output. +func (c Config) ToMachineConfig4_12Unvalidated(options common.TranslateOptions) (result.MachineConfig, translate.TranslationSet, report.Report) { + cfg, ts, r := c.Config.ToIgn3_2Unvalidated(options) + if r.IsFatal() { + return result.MachineConfig{}, ts, r + } + + // wrap + ts = ts.PrefixPaths(path.New("yaml"), path.New("json", "spec", "config")) + mc := result.MachineConfig{ + ApiVersion: result.MC_API_VERSION, + Kind: result.MC_KIND, + Metadata: result.Metadata{ + Name: c.Metadata.Name, + Labels: make(map[string]string), + }, + Spec: result.Spec{ + Config: cfg, + }, + } + ts.AddTranslation(path.New("yaml", "version"), path.New("json", "apiVersion")) + ts.AddTranslation(path.New("yaml", "version"), path.New("json", "kind")) + ts.AddTranslation(path.New("yaml", "metadata"), path.New("json", "metadata")) + ts.AddTranslation(path.New("yaml", "metadata", "name"), path.New("json", "metadata", "name")) + ts.AddTranslation(path.New("yaml", "metadata", "labels"), path.New("json", "metadata", "labels")) + ts.AddTranslation(path.New("yaml", "version"), path.New("json", "spec")) + ts.AddTranslation(path.New("yaml"), path.New("json", "spec", "config")) + for k, v := range c.Metadata.Labels { + mc.Metadata.Labels[k] = v + ts.AddTranslation(path.New("yaml", "metadata", "labels", k), path.New("json", "metadata", "labels", k)) + } + + // translate OpenShift fields + tr := translate.NewTranslator("yaml", "json", options) + from := &c.OpenShift + to := &mc.Spec + ts2, r2 := translate.Prefixed(tr, "extensions", &from.Extensions, &to.Extensions) + translate.MergeP(tr, ts2, &r2, "fips", &from.FIPS, &to.FIPS) + translate.MergeP2(tr, ts2, &r2, "kernel_arguments", &from.KernelArguments, "kernelArguments", &to.KernelArguments) + translate.MergeP2(tr, ts2, &r2, "kernel_type", &from.KernelType, "kernelType", &to.KernelType) + ts.MergeP2("openshift", "spec", ts2) + r.Merge(r2) + + // apply FIPS options to LUKS volumes + ts.Merge(addLuksFipsOptions(&mc)) + + // finally, check the fully desugared config for RHCOS and MCO support + r.Merge(validateRHCOSSupport(mc, ts)) + r.Merge(validateMCOSupport(mc, ts)) + + return mc, ts, r +} + +// ToMachineConfig4_12 translates the config to a MachineConfig. It returns a +// report of any errors or warnings in the source and resultant config. If +// the report has fatal errors or it encounters other problems translating, +// an error is returned. +func (c Config) ToMachineConfig4_12(options common.TranslateOptions) (result.MachineConfig, report.Report, error) { + cfg, r, err := cutil.Translate(c, "ToMachineConfig4_12Unvalidated", options) + return cfg.(result.MachineConfig), r, err +} + +// ToIgn3_2Unvalidated translates the config to an Ignition config. It also +// returns the set of translations it did so paths in the resultant config +// can be tracked back to their source in the source config. No config +// validation is performed on input or output. +func (c Config) ToIgn3_2Unvalidated(options common.TranslateOptions) (types.Config, translate.TranslationSet, report.Report) { + mc, ts, r := c.ToMachineConfig4_12Unvalidated(options) + cfg := mc.Spec.Config + + // report warnings if there are any non-empty fields in Spec (other + // than the Ignition config itself) that we're ignoring + mc.Spec.Config = types.Config{} + warnings := translate.PrefixReport(cutil.CheckForElidedFields(mc.Spec), "spec") + // translate from json space into yaml space + r.Merge(cutil.TranslateReportPaths(warnings, ts)) + + ts = ts.Descend(path.New("json", "spec", "config")) + return cfg, ts, r +} + +// ToIgn3_2 translates the config to an Ignition config. It returns a +// report of any errors or warnings in the source and resultant config. If +// the report has fatal errors or it encounters other problems translating, +// an error is returned. +func (c Config) ToIgn3_2(options common.TranslateOptions) (types.Config, report.Report, error) { + cfg, r, err := cutil.Translate(c, "ToIgn3_2Unvalidated", options) + return cfg.(types.Config), r, err +} + +// ToConfigBytes translates from a v4.12 Butane config to a v4.12 MachineConfig or a v3.2.0 Ignition config. It returns a report of any errors or +// warnings in the source and resultant config. If the report has fatal errors or it encounters other problems +// translating, an error is returned. +func ToConfigBytes(input []byte, options common.TranslateBytesOptions) ([]byte, report.Report, error) { + if options.Raw { + return cutil.TranslateBytes(input, &Config{}, "ToIgn3_2", options) + } else { + return cutil.TranslateBytesYAML(input, &Config{}, "ToMachineConfig4_12", options) + } +} + +func addLuksFipsOptions(mc *result.MachineConfig) translate.TranslationSet { + ts := translate.NewTranslationSet("yaml", "json") + if !util.IsTrue(mc.Spec.FIPS) { + return ts + } + +OUTER: + for i := range mc.Spec.Config.Storage.Luks { + luks := &mc.Spec.Config.Storage.Luks[i] + // Only add options if the user hasn't already specified + // a cipher option. Do this in-place, since config merging + // doesn't support conditional logic. + for _, option := range luks.Options { + if option == fipsCipherOption || + strings.HasPrefix(string(option), string(fipsCipherOption)+"=") || + option == fipsCipherShortOption { + continue OUTER + } + } + for j := 0; j < 2; j++ { + ts.AddTranslation(path.New("yaml", "openshift", "fips"), path.New("json", "spec", "config", "storage", "luks", i, "options", len(luks.Options)+j)) + } + if len(luks.Options) == 0 { + ts.AddTranslation(path.New("yaml", "openshift", "fips"), path.New("json", "spec", "config", "storage", "luks", i, "options")) + } + luks.Options = append(luks.Options, fipsCipherOption, fipsCipherArgument) + } + return ts +} + +// Error on fields that are rejected by RHCOS. +// +// Some of these fields may have been generated by sugar (e.g. +// boot_device.luks), so we work in JSON (output) space and then translate +// paths back to YAML (input) space. That's also the reason we do these +// checks after translation, rather than during validation. +func validateRHCOSSupport(mc result.MachineConfig, ts translate.TranslationSet) report.Report { + var r report.Report + for i, fs := range mc.Spec.Config.Storage.Filesystems { + if fs.Format != nil && *fs.Format == "btrfs" { + // we don't ship mkfs.btrfs + r.AddOnError(path.New("json", "spec", "config", "storage", "filesystems", i, "format"), common.ErrBtrfsSupport) + } + } + return cutil.TranslateReportPaths(r, ts) +} + +// Error on fields that are rejected outright by the MCO, or that are +// unsupported by the MCO and we want to discourage. +// +// https://github.com/openshift/machine-config-operator/blob/d6dabadeca05/MachineConfigDaemon.md#supported-vs-unsupported-ignition-config-changes +// +// Some of these fields may have been generated by sugar (e.g. storage.trees), +// so we work in JSON (output) space and then translate paths back to YAML +// (input) space. That's also the reason we do these checks after +// translation, rather than during validation. +func validateMCOSupport(mc result.MachineConfig, ts translate.TranslationSet) report.Report { + // Error classes for the purposes of this function: + // + // UNPARSABLE - Cannot be rendered into a config by the MCC. If + // present in MC, MCC will mark the pool degraded. We reject these. + // + // FORBIDDEN - Not supported by the MCD. If present in MC, MCD will + // mark the node degraded. We reject these. + // + // IMMUTABLE - Permitted in MC, passed through to Ignition, but not + // supported by the MCD. MCD will mark the node degraded if the + // field changes after the node is provisioned. We reject these + // outright to discourage their use. + // + // TRIPWIRE - A subset of fields in the containing struct are + // supported by the MCD. If the struct contents change after the node + // is provisioned, and the struct contains unsupported fields, MCD + // will mark the node degraded, even if the change only affects + // supported fields. We reject these. + + var r report.Report + for i := range mc.Spec.Config.Storage.Directories { + // IMMUTABLE + r.AddOnError(path.New("json", "spec", "config", "storage", "directories", i), common.ErrDirectorySupport) + } + for i, file := range mc.Spec.Config.Storage.Files { + if len(file.Append) > 0 { + // FORBIDDEN + r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "append"), common.ErrFileAppendSupport) + } + if file.Contents.Source != nil { + fileSource, err := url.Parse(*file.Contents.Source) + // parse errors will be caught by normal config validation + if err == nil && fileSource.Scheme != "data" { + // FORBIDDEN + r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "contents", "source"), common.ErrFileSchemeSupport) + } + } + if file.Mode != nil && *file.Mode & ^0777 != 0 { + // UNPARSABLE + r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "mode"), common.ErrFileSpecialModeSupport) + } + } + for i := range mc.Spec.Config.Storage.Links { + // IMMUTABLE + // If you change this to be less restrictive without adding + // link support in the MCO, consider what should happen if + // the user specifies a storage.tree that includes symlinks. + r.AddOnError(path.New("json", "spec", "config", "storage", "links", i), common.ErrLinkSupport) + } + for i := range mc.Spec.Config.Passwd.Groups { + // IMMUTABLE + r.AddOnError(path.New("json", "spec", "config", "passwd", "groups", i), common.ErrGroupSupport) + } + for i, user := range mc.Spec.Config.Passwd.Users { + if user.Name == "core" { + // SSHAuthorizedKeys is managed; other fields are not + v := reflect.ValueOf(user) + t := v.Type() + for j := 0; j < v.NumField(); j++ { + fv := v.Field(j) + ft := t.Field(j) + switch ft.Name { + case "Name", "SSHAuthorizedKeys": + continue + default: + if fv.IsValid() && !fv.IsZero() { + tag := strings.Split(ft.Tag.Get("json"), ",")[0] + // TRIPWIRE + r.AddOnError(path.New("json", "spec", "config", "passwd", "users", i, tag), common.ErrUserFieldSupport) + } + } + } + } else { + // TRIPWIRE + r.AddOnError(path.New("json", "spec", "config", "passwd", "users", i), common.ErrUserNameSupport) + } + } + return cutil.TranslateReportPaths(r, ts) +} diff --git a/mantle/util/bunzip.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/validate.go similarity index 43% rename from mantle/util/bunzip.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/validate.go index d17c1aca..ff1403cb 100644 --- a/mantle/util/bunzip.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_12/validate.go @@ -1,4 +1,4 @@ -// Copyright 2016 CoreOS, Inc. +// Copyright 2021 Red Hat, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -10,41 +10,34 @@ // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -// limitations under the License. +// limitations under the License.) -package util +package v4_12 import ( - "compress/bzip2" - "io" - "os" -) + "github.com/coreos/butane/config/common" -// Bunzip2 does bunzip2 decompression from src to dst. -// -// It matches the signature of io.Copy. -func Bunzip2(dst io.Writer, src io.Reader) (written int64, err error) { - bzr := bzip2.NewReader(src) - return io.Copy(dst, bzr) -} + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) -// Bunzip2File does bunzip2 decompression from src file into dst file. -func Bunzip2File(dst, src string) error { - in, err := os.Open(src) - if err != nil { - return err +func (m Metadata) Validate(c path.ContextPath) (r report.Report) { + if m.Name == "" { + r.AddOnError(c.Append("name"), common.ErrNameRequired) } - - defer in.Close() - - out, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err + if m.Labels[ROLE_LABEL_KEY] == "" { + r.AddOnError(c.Append("labels", ROLE_LABEL_KEY), common.ErrRoleRequired) } + return +} - _, err = Bunzip2(out, in) - if err != nil { - os.Remove(dst) +func (os OpenShift) Validate(c path.ContextPath) (r report.Report) { + if os.KernelType != nil { + switch *os.KernelType { + case "", "default", "realtime": + default: + r.AddOnError(c.Append("kernel_type"), common.ErrInvalidKernelType) + } } - return err + return } diff --git a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/result/schema.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/result/schema.go similarity index 100% rename from mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/result/schema.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/result/schema.go diff --git a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/schema.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/schema.go similarity index 98% rename from mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/schema.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/schema.go index 60a551fa..ab204d39 100644 --- a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_11_exp/schema.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/schema.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License.) -package v4_11_exp +package v4_13_exp import ( fcos "github.com/coreos/butane/config/fcos/v1_5_exp" diff --git a/mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/translate.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/translate.go new file mode 100644 index 00000000..d4840fc1 --- /dev/null +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/translate.go @@ -0,0 +1,321 @@ +// Copyright 2020 Red Hat, Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.) + +package v4_13_exp + +import ( + "net/url" + "reflect" + "strings" + + "github.com/coreos/butane/config/common" + "github.com/coreos/butane/config/openshift/v4_13_exp/result" + cutil "github.com/coreos/butane/config/util" + "github.com/coreos/butane/translate" + + "github.com/coreos/ignition/v2/config/util" + "github.com/coreos/ignition/v2/config/v3_4_experimental/types" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +const ( + // FIPS 140-2 doesn't allow the default XTS mode + fipsCipherOption = types.LuksOption("--cipher") + fipsCipherShortOption = types.LuksOption("-c") + fipsCipherArgument = types.LuksOption("aes-cbc-essiv:sha256") +) + +// ToMachineConfig4_13Unvalidated translates the config to a MachineConfig. It also +// returns the set of translations it did so paths in the resultant config +// can be tracked back to their source in the source config. No config +// validation is performed on input or output. +func (c Config) ToMachineConfig4_13Unvalidated(options common.TranslateOptions) (result.MachineConfig, translate.TranslationSet, report.Report) { + cfg, ts, r := c.Config.ToIgn3_4Unvalidated(options) + if r.IsFatal() { + return result.MachineConfig{}, ts, r + } + ts = translateUserGrubCfg(&cfg, &ts) + + // wrap + ts = ts.PrefixPaths(path.New("yaml"), path.New("json", "spec", "config")) + mc := result.MachineConfig{ + ApiVersion: result.MC_API_VERSION, + Kind: result.MC_KIND, + Metadata: result.Metadata{ + Name: c.Metadata.Name, + Labels: make(map[string]string), + }, + Spec: result.Spec{ + Config: cfg, + }, + } + ts.AddTranslation(path.New("yaml", "version"), path.New("json", "apiVersion")) + ts.AddTranslation(path.New("yaml", "version"), path.New("json", "kind")) + ts.AddTranslation(path.New("yaml", "metadata"), path.New("json", "metadata")) + ts.AddTranslation(path.New("yaml", "metadata", "name"), path.New("json", "metadata", "name")) + ts.AddTranslation(path.New("yaml", "metadata", "labels"), path.New("json", "metadata", "labels")) + ts.AddTranslation(path.New("yaml", "version"), path.New("json", "spec")) + ts.AddTranslation(path.New("yaml"), path.New("json", "spec", "config")) + for k, v := range c.Metadata.Labels { + mc.Metadata.Labels[k] = v + ts.AddTranslation(path.New("yaml", "metadata", "labels", k), path.New("json", "metadata", "labels", k)) + } + + // translate OpenShift fields + tr := translate.NewTranslator("yaml", "json", options) + from := &c.OpenShift + to := &mc.Spec + ts2, r2 := translate.Prefixed(tr, "extensions", &from.Extensions, &to.Extensions) + translate.MergeP(tr, ts2, &r2, "fips", &from.FIPS, &to.FIPS) + translate.MergeP2(tr, ts2, &r2, "kernel_arguments", &from.KernelArguments, "kernelArguments", &to.KernelArguments) + translate.MergeP2(tr, ts2, &r2, "kernel_type", &from.KernelType, "kernelType", &to.KernelType) + ts.MergeP2("openshift", "spec", ts2) + r.Merge(r2) + + // apply FIPS options to LUKS volumes + ts.Merge(addLuksFipsOptions(&mc)) + + // finally, check the fully desugared config for RHCOS and MCO support + r.Merge(validateRHCOSSupport(mc, ts)) + r.Merge(validateMCOSupport(mc, ts)) + + return mc, ts, r +} + +// ToMachineConfig4_13 translates the config to a MachineConfig. It returns a +// report of any errors or warnings in the source and resultant config. If +// the report has fatal errors or it encounters other problems translating, +// an error is returned. +func (c Config) ToMachineConfig4_13(options common.TranslateOptions) (result.MachineConfig, report.Report, error) { + cfg, r, err := cutil.Translate(c, "ToMachineConfig4_13Unvalidated", options) + return cfg.(result.MachineConfig), r, err +} + +// ToIgn3_4Unvalidated translates the config to an Ignition config. It also +// returns the set of translations it did so paths in the resultant config +// can be tracked back to their source in the source config. No config +// validation is performed on input or output. +func (c Config) ToIgn3_4Unvalidated(options common.TranslateOptions) (types.Config, translate.TranslationSet, report.Report) { + mc, ts, r := c.ToMachineConfig4_13Unvalidated(options) + cfg := mc.Spec.Config + + // report warnings if there are any non-empty fields in Spec (other + // than the Ignition config itself) that we're ignoring + mc.Spec.Config = types.Config{} + warnings := translate.PrefixReport(cutil.CheckForElidedFields(mc.Spec), "spec") + // translate from json space into yaml space + r.Merge(cutil.TranslateReportPaths(warnings, ts)) + + ts = ts.Descend(path.New("json", "spec", "config")) + return cfg, ts, r +} + +// ToIgn3_4 translates the config to an Ignition config. It returns a +// report of any errors or warnings in the source and resultant config. If +// the report has fatal errors or it encounters other problems translating, +// an error is returned. +func (c Config) ToIgn3_4(options common.TranslateOptions) (types.Config, report.Report, error) { + cfg, r, err := cutil.Translate(c, "ToIgn3_4Unvalidated", options) + return cfg.(types.Config), r, err +} + +// ToConfigBytes translates from a v4.13 Butane config to a v4.13 MachineConfig or a v3.4.0 Ignition config. It returns a report of any errors or +// warnings in the source and resultant config. If the report has fatal errors or it encounters other problems +// translating, an error is returned. +func ToConfigBytes(input []byte, options common.TranslateBytesOptions) ([]byte, report.Report, error) { + if options.Raw { + return cutil.TranslateBytes(input, &Config{}, "ToIgn3_4", options) + } else { + return cutil.TranslateBytesYAML(input, &Config{}, "ToMachineConfig4_13", options) + } +} + +func addLuksFipsOptions(mc *result.MachineConfig) translate.TranslationSet { + ts := translate.NewTranslationSet("yaml", "json") + if !util.IsTrue(mc.Spec.FIPS) { + return ts + } + +OUTER: + for i := range mc.Spec.Config.Storage.Luks { + luks := &mc.Spec.Config.Storage.Luks[i] + // Only add options if the user hasn't already specified + // a cipher option. Do this in-place, since config merging + // doesn't support conditional logic. + for _, option := range luks.Options { + if option == fipsCipherOption || + strings.HasPrefix(string(option), string(fipsCipherOption)+"=") || + option == fipsCipherShortOption { + continue OUTER + } + } + for j := 0; j < 2; j++ { + ts.AddTranslation(path.New("yaml", "openshift", "fips"), path.New("json", "spec", "config", "storage", "luks", i, "options", len(luks.Options)+j)) + } + if len(luks.Options) == 0 { + ts.AddTranslation(path.New("yaml", "openshift", "fips"), path.New("json", "spec", "config", "storage", "luks", i, "options")) + } + luks.Options = append(luks.Options, fipsCipherOption, fipsCipherArgument) + } + return ts +} + +// Error on fields that are rejected by RHCOS. +// +// Some of these fields may have been generated by sugar (e.g. +// boot_device.luks), so we work in JSON (output) space and then translate +// paths back to YAML (input) space. That's also the reason we do these +// checks after translation, rather than during validation. +func validateRHCOSSupport(mc result.MachineConfig, ts translate.TranslationSet) report.Report { + var r report.Report + for i, fs := range mc.Spec.Config.Storage.Filesystems { + if fs.Format != nil && *fs.Format == "btrfs" { + // we don't ship mkfs.btrfs + r.AddOnError(path.New("json", "spec", "config", "storage", "filesystems", i, "format"), common.ErrBtrfsSupport) + } + } + return cutil.TranslateReportPaths(r, ts) +} + +// Error on fields that are rejected outright by the MCO, or that are +// unsupported by the MCO and we want to discourage. +// +// https://github.com/openshift/machine-config-operator/blob/d6dabadeca05/MachineConfigDaemon.md#supported-vs-unsupported-ignition-config-changes +// +// Some of these fields may have been generated by sugar (e.g. storage.trees), +// so we work in JSON (output) space and then translate paths back to YAML +// (input) space. That's also the reason we do these checks after +// translation, rather than during validation. +func validateMCOSupport(mc result.MachineConfig, ts translate.TranslationSet) report.Report { + // Error classes for the purposes of this function: + // + // UNPARSABLE - Cannot be rendered into a config by the MCC. If + // present in MC, MCC will mark the pool degraded. We reject these. + // + // FORBIDDEN - Not supported by the MCD. If present in MC, MCD will + // mark the node degraded. We reject these. + // + // REDUNDANT - Feature is also provided by a MachineConfig-specific + // field with different semantics. To reduce confusion, disable + // this implementation. + // + // IMMUTABLE - Permitted in MC, passed through to Ignition, but not + // supported by the MCD. MCD will mark the node degraded if the + // field changes after the node is provisioned. We reject these + // outright to discourage their use. + // + // TRIPWIRE - A subset of fields in the containing struct are + // supported by the MCD. If the struct contents change after the node + // is provisioned, and the struct contains unsupported fields, MCD + // will mark the node degraded, even if the change only affects + // supported fields. We reject these. + + var r report.Report + for i, fs := range mc.Spec.Config.Storage.Filesystems { + if fs.Format != nil && *fs.Format == "none" { + // UNPARSABLE + r.AddOnError(path.New("json", "spec", "config", "storage", "filesystems", i, "format"), common.ErrFilesystemNoneSupport) + } + } + for i := range mc.Spec.Config.Storage.Directories { + // IMMUTABLE + r.AddOnError(path.New("json", "spec", "config", "storage", "directories", i), common.ErrDirectorySupport) + } + for i, file := range mc.Spec.Config.Storage.Files { + if len(file.Append) > 0 { + // FORBIDDEN + r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "append"), common.ErrFileAppendSupport) + } + if file.Contents.Source != nil { + fileSource, err := url.Parse(*file.Contents.Source) + // parse errors will be caught by normal config validation + if err == nil && fileSource.Scheme != "data" { + // FORBIDDEN + r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "contents", "source"), common.ErrFileSchemeSupport) + } + } + if file.Mode != nil && *file.Mode & ^0777 != 0 { + // UNPARSABLE + r.AddOnError(path.New("json", "spec", "config", "storage", "files", i, "mode"), common.ErrFileSpecialModeSupport) + } + } + for i := range mc.Spec.Config.Storage.Links { + // IMMUTABLE + // If you change this to be less restrictive without adding + // link support in the MCO, consider what should happen if + // the user specifies a storage.tree that includes symlinks. + r.AddOnError(path.New("json", "spec", "config", "storage", "links", i), common.ErrLinkSupport) + } + for i := range mc.Spec.Config.Passwd.Groups { + // IMMUTABLE + r.AddOnError(path.New("json", "spec", "config", "passwd", "groups", i), common.ErrGroupSupport) + } + for i, user := range mc.Spec.Config.Passwd.Users { + if user.Name == "core" { + // SSHAuthorizedKeys is managed; other fields are not + v := reflect.ValueOf(user) + t := v.Type() + for j := 0; j < v.NumField(); j++ { + fv := v.Field(j) + ft := t.Field(j) + switch ft.Name { + case "Name", "SSHAuthorizedKeys": + continue + default: + if fv.IsValid() && !fv.IsZero() { + tag := strings.Split(ft.Tag.Get("json"), ",")[0] + // TRIPWIRE + r.AddOnError(path.New("json", "spec", "config", "passwd", "users", i, tag), common.ErrUserFieldSupport) + } + } + } + } else { + // TRIPWIRE + r.AddOnError(path.New("json", "spec", "config", "passwd", "users", i), common.ErrUserNameSupport) + } + } + for i := range mc.Spec.Config.KernelArguments.ShouldExist { + // UNPARSABLE, REDUNDANT + r.AddOnError(path.New("json", "spec", "config", "kernelArguments", "shouldExist", i), common.ErrKernelArgumentSupport) + } + for i := range mc.Spec.Config.KernelArguments.ShouldNotExist { + // UNPARSABLE, REDUNDANT + r.AddOnError(path.New("json", "spec", "config", "kernelArguments", "shouldNotExist", i), common.ErrKernelArgumentSupport) + } + return cutil.TranslateReportPaths(r, ts) +} + +// fcos config generates a user.cfg file using append; however, OpenShift config +// does not support append (since MCO does not support it). Let change the file to use contents +func translateUserGrubCfg(config *types.Config, ts *translate.TranslationSet) translate.TranslationSet { + newMappings := translate.NewTranslationSet("json", "json") + for i, file := range config.Storage.Files { + if file.Path == "/boot/grub2/user.cfg" { + if len(file.Append) != 1 { + // The number of append objects was different from expected, this file + // was created by the user and not via butane GRUB sugar + return *ts + } + fromPath := path.New("json", "storage", "files", i, "append", 0) + translatedPath := path.New("json", "storage", "files", i, "contents") + config.Storage.Files[i].FileEmbedded1.Contents = file.Append[0] + config.Storage.Files[i].FileEmbedded1.Append = nil + newMappings.AddFromCommonObject(fromPath, translatedPath, config.Storage.Files[i].FileEmbedded1.Contents) + + return ts.Map(newMappings) + } + } + return *ts +} diff --git a/mantle/system/mount_linux_test.go b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/validate.go similarity index 43% rename from mantle/system/mount_linux_test.go rename to mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/validate.go index 7847eb53..74f0955d 100644 --- a/mantle/system/mount_linux_test.go +++ b/mantle/vendor/github.com/coreos/butane/config/openshift/v4_13_exp/validate.go @@ -1,4 +1,4 @@ -// Copyright 2015 CoreOS, Inc. +// Copyright 2021 Red Hat, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -10,34 +10,34 @@ // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and -// limitations under the License. +// limitations under the License.) -package system +package v4_13_exp import ( - "syscall" - "testing" + "github.com/coreos/butane/config/common" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" ) -func TestSplitFlags(t *testing.T) { - data := []struct { - opts string - flags uintptr - extra string - }{ - {"", 0, ""}, - {"nodev,nosuid,mode=755", syscall.MS_NOSUID | syscall.MS_NODEV, "mode=755"}, - {"mode=755,other", 0, "mode=755,other"}, - {"mode=755,nodev,other", syscall.MS_NODEV, "mode=755,other"}, +func (m Metadata) Validate(c path.ContextPath) (r report.Report) { + if m.Name == "" { + r.AddOnError(c.Append("name"), common.ErrNameRequired) + } + if m.Labels[ROLE_LABEL_KEY] == "" { + r.AddOnError(c.Append("labels", ROLE_LABEL_KEY), common.ErrRoleRequired) } + return +} - for _, d := range data { - f, e := splitFlags(d.opts) - if f != d.flags { - t.Errorf("bad flags for %q, got 0x%x wanted 0x%x", d.opts, f, d.flags) - } - if e != d.extra { - t.Errorf("bad extra for %q, got %q wanted %q", d.opts, e, d.extra) +func (os OpenShift) Validate(c path.ContextPath) (r report.Report) { + if os.KernelType != nil { + switch *os.KernelType { + case "", "default", "realtime": + default: + r.AddOnError(c.Append("kernel_type"), common.ErrInvalidKernelType) } } + return } diff --git a/mantle/vendor/github.com/coreos/butane/translate/set.go b/mantle/vendor/github.com/coreos/butane/translate/set.go index e6c582e4..60df644e 100644 --- a/mantle/vendor/github.com/coreos/butane/translate/set.go +++ b/mantle/vendor/github.com/coreos/butane/translate/set.go @@ -105,6 +105,19 @@ func (ts TranslationSet) AddFromCommonSource(common path.ContextPath, toPrefix p ts.AddTranslation(common, toPrefix) } +// AddFromCommonObject adds translations for all of the paths in to. The paths being translated +// are prefixed by fromPrefix and the translated paths are prefixed by toPrefix. +// This is useful when we want to copy all the fields of an object to another with the same field names. +func (ts TranslationSet) AddFromCommonObject(fromPrefix path.ContextPath, toPrefix path.ContextPath, to interface{}) { + vTo := reflect.ValueOf(to) + vPaths := getAllPaths(vTo, ts.ToTag, true) + + for _, path := range vPaths { + ts.AddTranslation(fromPrefix.Append(path.Path...), toPrefix.Append(path.Path...)) + } + ts.AddTranslation(fromPrefix, toPrefix) +} + // Merge adds all the entries to the set. It mutates the Set in place. func (ts TranslationSet) Merge(from TranslationSet) { for _, t := range from.Set { @@ -160,6 +173,24 @@ OUTER: return ret } +// Map returns a new TranslationSet with To translation paths further +// translated through mappings. Translations not listed in mappings are +// copied unmodified. +func (ts TranslationSet) Map(mappings TranslationSet) TranslationSet { + if mappings.FromTag != ts.ToTag || mappings.ToTag != ts.ToTag { + panic(fmt.Sprintf("mappings have incorrect tag; %q != %q || %q != %q", mappings.FromTag, ts.ToTag, mappings.ToTag, ts.ToTag)) + } + ret := NewTranslationSet(ts.FromTag, ts.ToTag) + ret.Merge(ts) + for _, mapping := range mappings.Set { + if t, ok := ret.Set[mapping.From.String()]; ok { + delete(ret.Set, mapping.From.String()) + ret.AddTranslation(t.From, mapping.To) + } + } + return ret +} + // DebugVerifyCoverage recursively checks whether every non-zero field in v // has a translation. If translations are missing, it returns a multi-line // error listing them. diff --git a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/cosa_reader.go b/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/cosa_reader.go deleted file mode 100644 index e193360a..00000000 --- a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/cosa_reader.go +++ /dev/null @@ -1,252 +0,0 @@ -package cosa - -import ( - "context" - "io" - "os" - "path/filepath" - "strings" - "time" - - "github.com/minio/minio-go/v7" - "github.com/pkg/errors" - log "github.com/sirupsen/logrus" -) - -/* - cosa_reader.go provides an interface for interacting with - files through an "ioBackender." Any struct that inmplements ioBackender - can read meta-data. - -*/ - -// default ioBackend is file backend -var ioBackend ioBackender = new(ioBackendFile) - -// ioBackendMinio is an ioBackender. -var _ ioBackender = &ioBackendMinio{} - -// newBackend returns a new backend -func newBackend() ioBackender { - var newBackender ioBackender = ioBackend - return newBackender -} - -// Open calls the backend's open function. -func Open(p string) (io.ReadCloser, error) { - nb := newBackend() - return nb.Open(p) -} - -// ioBackender is the basic interface. -type ioBackender interface { - Open(string) (io.ReadCloser, error) -} - -// ioBackendFile is a file based backend -type ioBackendFile struct { - *os.File - path string -} - -// Open implements ioBackender Open interface. -func (i *ioBackendFile) Open(p string) (io.ReadCloser, error) { - f, err := os.Open(p) - i.File = f - i.path = p - return f, err -} - -func (i *ioBackendFile) Name() string { - return i.path -} - -// ioBackendMinio is a minio based backend -type ioBackendMinio struct { - ctx context.Context - m *minio.Client - obj *minio.Object - name string - - bucket string - prefix string -} - -var ErrNoMinioClient = errors.New("minio client is not defined") - -// getBucketAndPath returns the relative bucket and path. -func (im *ioBackendMinio) getBucketAndPath(p string) (string, string) { - parts := strings.Split(p, "/") - path := strings.Join(parts[1:], "/") - - bucket := parts[0] - if im.bucket != "" { - bucket = im.bucket - path = p - } - if im.prefix != "" { - path = filepath.Join(im.prefix, path) - } - return bucket, path -} - -// Open implements ioBackender's and os.File's Open interface. -func (im *ioBackendMinio) Open(p string) (io.ReadCloser, error) { - if im.m == nil { - return nil, ErrNoMinioClient - } - - bucket, path := im.getBucketAndPath(p) - obj, err := im.m.GetObject(im.ctx, bucket, path, minio.GetObjectOptions{}) - if err != nil { - return nil, err - } - im.obj = obj - im.name = p - - return obj, nil -} - -// objectInfo holds basic information about either a file object -// or a remote minio object. -type objectInfo struct { - info minio.ObjectInfo - name string -} - -// TODO: drop with GoLang 1.16. This is a backport of the interface from 1.16. -// var _ os.FileInfo = &objectInfo{} -type fileMode uint32 -type fileInfo interface { - Name() string // base name of the file - Size() int64 // length in bytes for regular files; system-dependent for others - Mode() fileMode // file mode bits - ModTime() time.Time // modification time - IsDir() bool // abbreviation for Mode().IsDir() - Sys() interface{} // underlying data source (can return nil) -} - -// objectInfo implements the os.FileInfo interface. -// This allows for abstracting any file or object to be compared as if they were -// local files regardless of location. -var _ fileInfo = &objectInfo{} - -// IsDir implements the os.FileInfo IsDir func. For minio objects, -// the answer is always false. -func (ao *objectInfo) IsDir() bool { - return false -} - -// ModTime implements the os.FileInfo ModTime func. The returned value -// is remote aodification time. -func (ao *objectInfo) ModTime() time.Time { - return ao.info.LastModified -} - -// Mode implements the os.FileInfo Mode func. Since there is not simple -// way to convert an ACL into Unix permisions, it blindly returns 0644. -func (ao *objectInfo) Mode() fileMode { - return 0644 -} - -// Name implements the os.FileInfo interface Name func. -func (ao *objectInfo) Name() string { - return filepath.Base(ao.name) -} - -// Size implements the os.FileInfo size func. -func (ao *objectInfo) Size() int64 { - return ao.info.Size -} - -// Sys implements the os.FileInfo interface Sys func. The interface spec allows -// for returning a nil. -func (ao *objectInfo) Sys() interface{} { - return nil -} - -// SetIOBackendMinio sets the backend to minio. The client must be provided -// by the caller, including authorization. -func SetIOBackendMinio(ctx context.Context, m *minio.Client, bucket, prefix string) error { - if m == nil { - return errors.New("minio client must not be nil") - } - - log.WithFields(log.Fields{ - "bucket": bucket, - "prefix": prefix, - }).Info("minio bucket and prefix defined") - - backend := &ioBackendMinio{ - m: m, - ctx: ctx, - bucket: bucket, - prefix: prefix, - } - ioBackend = backend - walkFn = createMinioWalkFunc(m, bucket, prefix) - return nil -} - -// SetIOBackendFile sets the backend to the default file backend. -func SetIOBackendFile() { - ioBackend = new(ioBackendFile) -} - -// walkerFn is a function that implements the walk func -type walkerFn func(string) <-chan fileInfo - -// walkFn is used to walk paths -var walkFn walkerFn = defaultWalkFunc - -// defaultWalkFunc walks over a directory and returns a channel of os.FileInfo -func defaultWalkFunc(p string) <-chan fileInfo { - ret := make(chan fileInfo) - go func() { - defer close(ret) //nolint - _ = filepath.Walk(p, func(path string, info os.FileInfo, err error) error { - if err != nil { - return nil - } - ret <- &objectInfo{ - name: filepath.Join(p, info.Name()), - info: minio.ObjectInfo{ - Key: info.Name(), - Size: info.Size(), - LastModified: info.ModTime(), - }, - } - return nil - }) - }() - return ret -} - -// createMinioWalkFunc creates a new func a minio client. The returned function -// will list the remote objects and return os.FileInfo compliant interfaces. -func createMinioWalkFunc(m *minio.Client, bucket, prefix string) walkerFn { - return func(p string) <-chan fileInfo { - ret := make(chan fileInfo) - go func() { - defer close(ret) //nolint - ao := minio.ListObjectsOptions{ - Recursive: true, - } - if prefix != "" { - ao.Prefix = prefix - } - info := m.ListObjects(context.Background(), bucket, ao) - for { - val, ok := <-info - if !ok { - return - } - ret <- &objectInfo{ - info: val, - name: filepath.Join(bucket, val.Key), - } - } - }() - return ret - } -} diff --git a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/schema_doc.go b/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/schema_doc.go deleted file mode 100644 index e1ddd61c..00000000 --- a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/schema_doc.go +++ /dev/null @@ -1,869 +0,0 @@ -// Generated by ./generate-schema.sh -// DO NOT EDIT - -package cosa - -var generatedSchemaJSON = `{ - "definitions": { - "artifact": { - "type": "object", - "properties": { - "path": { - "$id": "#/artifact/Path", - "type":"string", - "title":"Path" - }, - "sha256": { - "$id": "#/artifact/sha256", - "type":"string", - "title":"SHA256" - }, - "size": { - "$id": "#/artifact/size", - "type":"number", - "title":"Size in bytes" - }, - "skip-compression": { - "$id": "#/artifact/skip-compression", - "type":"boolean", - "title":"Skip compression", - "description":"Artifact should not be compressed or decompressed before use", - "default":false - }, - "uncompressed-sha256": { - "$id": "#/artifact/uncompressed-sha256", - "type":"string", - "title":"Uncompressed SHA256" - }, - "uncompressed-size": { - "$id": "#/artifact/uncompressed-size", - "type":"integer", - "title":"Uncompressed-size" - } - }, - "optional": [ - "size", - "uncompressed-sha256", - "uncompressed-size", - "skip-compression" - ], - "required": [ - "path", - "sha256" - ] - }, - "image": { - "type": "object", - "required": [ - "digest", - "image" - ], - "optional": [ - "comment" - ], - "properties": { - "digest": { - "$id": "#/image/digest", - "type":"string", - "title":"Digest" - }, - "comment": { - "$id": "#/image/comment", - "type":"string", - "title":"Comment" - }, - "image": { - "$id": "#/image/image", - "type":"string", - "title":"Image" - } - } - }, - "cloudartifact": { - "type": "object", - "required": [ - "url" - ], - "optional": [ - "image", - "object", - "bucket", - "region" - ], - "properties": { - "image": { - "$id":"#/cloudartifact/image", - "type":"string", - "title":"Image" - }, - "url": { - "$id":"#/cloudartifact/url", - "type":"string", - "title":"URL" - }, - "bucket": { - "$id":"#/cloudartifact/bucket", - "type":"string", - "title":"Bucket" - }, - "region": { - "$id":"#/cloudartifact/region", - "type":"string", - "title":"Region" - }, - "object": { - "$id":"#/cloudartifact/object", - "type":"string", - "title":"Object" - } - } - }, - "git": { - "type": "object", - "required": [ - "commit", - "origin" - ], - "optional": [ - "branch", - "dirty" - ], - "properties": { - "branch": { - "$id":"#/git/branch", - "type":"string", - "title":"branch", - "default":"", - "examples": [ - "HEAD" - ], - "minLength": 3 - }, - "commit": { - "$id":"#/git/commit", - "type":"string", - "title":"commit", - "default":"", - "examples": [ - "742edc307e58f35824d906958b6493510e12b593" - ], - "minLength": 5 - }, - "dirty": { - "$id":"#/git/dirty", - "type":"string", - "title":"dirty", - "default":"", - "examples": [ - "true" - ], - "minLength": 1 - }, - "origin": { - "$id":"#/git/origin", - "type":"string", - "title":"origin", - "default":"", - "examples": [ - "https://github.com/coreos/fedora-coreos-config" - ], - "minLength": 1 - } - } - }, - "pkg-items": { - "type":"array", - "title":"Package Set differences", - "items": { - "$id":"#/pkgdiff/items/item", - "title":"Items", - "default":"", - "minLength": 1 - } - }, - "advisory-items": { - "type":"array", - "title":"Advisory diff", - "items": { - "$id":"#/advisory-diff/items/item", - "title":"Items", - "default":"" - } - } - }, - "$schema":"http://json-schema.org/draft-07/schema#", - "$id":"http://github.com/coreos/coreos-assembler/blob/main/v1.json.json", - "type":"object", - "title":"CoreOS Assember v1 meta.json schema", - "required": [ - "buildid", - "name", - "ostree-commit", - "ostree-content-checksum", - "ostree-timestamp", - "ostree-version", - "rpm-ostree-inputhash", - "summary" - ], - "optional": [ - "aliyun", - "amis", - "azure", - "azurestack", - "build-url", - "digitalocean", - "exoscale", - "gcp", - "ibmcloud", - "powervs", - "images", - "koji", - "oscontainer", - "extensions", - "parent-pkgdiff", - "pkgdiff", - "parent-advisories-diff", - "advisories-diff", - "release-payload", - - "coreos-assembler.basearch", - "coreos-assembler.build-timestamp", - "coreos-assembler.code-source", - "coreos-assembler.config-dirty", - "coreos-assembler.config-gitrev", - "coreos-assembler.container-config-git", - "coreos-assembler.container-image-git", - "coreos-assembler.delayed-meta-merge", - "coreos-assembler.image-config-checksum", - "coreos-assembler.image-genver", - "coreos-assembler.image-input-checksum", - "coreos-assembler.meta-stamp", - "coreos-assembler.overrides-active", - "fedora-coreos.parent-commit", - "fedora-coreos.parent-version", - "ref" - ], - "additionalProperties":false, - "properties": { - "ref": { - "$id":"#/properties/ref", - "type":"string", - "title":"BuildRef", - "default":"", - "minLength": 1 - }, - "build-url": { - "$id":"#/properties/build-url", - "type":"string", - "title":"Build URL", - "default":"", - "minLength": 1 - }, - "buildid": { - "$id":"#/properties/buildid", - "type":"string", - "title":"BuildID", - "default":"", - "minLength": 1 - }, - "koji": { - "type": "object", - "properties": { - "build_id": { - "$id":"#/properties/kojiid", - "type":"number", - "title":"Koji Build ID" - }, - "token": { - "$id":"#/properties/kojitoken", - "type":"string", - "title":"Koji Token" - }, - "release": { - "$id":"#/properties/buildrelease", - "type":"string", - "title":"Build Release" - } - } - }, - "coreos-assembler.basearch": { - "$id":"#/properties/coreos-assembler.basearch", - "type":"string", - "title":"Architecture", - "default":"", - "minLength": 1 - }, - "coreos-assembler.build-timestamp": { - "$id":"#/properties/coreos-assembler.build-timestamp", - "type":"string", - "title":"Build Time Stamp", - "default":"", - "minLength": 1 - }, - "coreos-assembler.code-source": { - "$id":"#/properties/coreos-assembler.code-source", - "type":"string", - "title":"CoreOS Source", - "default":"", - "minLength": 1 - }, - "coreos-assembler.config-dirty": { - "$id":"#/properties/coreos-assembler.config-dirty", - "type":"string", - "title":"GitDirty", - "default":"", - "minLength": 1 - }, - "coreos-assembler.config-gitrev": { - "$id":"#/properties/coreos-assembler.config-gitrev", - "type":"string", - "title":"Config GitRev", - "default":"", - "minLength": 1 - }, - "coreos-assembler.container-config-git": { - "$id":"#/properties/coreos-assembler.container-config-git", - "type":"object", - "title":"Container Config GIT", - "$ref": "#/definitions/git" - }, - "coreos-assembler.container-image-git": { - "$id":"#/properties/coreos-assembler.container-image-git", - "type":"object", - "title":"COSA Container Image Git", - "$ref": "#/definitions/git" - }, - "coreos-assembler.delayed-meta-merge": { - "$id":"#/properties/coreos-assembler.delayed-meta-merge", - "type":"boolean", - "title":"COSA Delayed Meta Merge", - "default": "False" - }, - "coreos-assembler.meta-stamp": { - "$id":"#/properties/coreos-assembler.meta-stamp", - "type":"number", - "title":"Meta Stamp", - "default":"", - "minLength": 16 - }, - "fedora-coreos.parent-version": { - "$id":"#/properties/fedora-coreos.parent-version", - "type":"string", - "title":"Fedora CoreOS Parent Version", - "default":"", - "minLength": 12 - }, - "fedora-coreos.parent-commit": { - "$id":"#/properties/fedora-coreos.parent-commit", - "type":"string", - "title":"Fedora CoreOS parent commit", - "default":"", - "examples": [ - "f15f5b25cf138a7683e3d200c53ece2091bf71d31332135da87892ab72ff4ee3" - ], - "minLength": 64 - }, - "coreos-assembler.image-config-checksum": { - "$id":"#/properties/coreos-assembler.image-config-checksum", - "type":"string", - "title":"COSA image checksum", - "default":"", - "minLength": 64 - }, - "coreos-assembler.image-genver": { - "$id":"#/properties/coreos-assembler.image-genver", - "type":"integer", - "title":"COSA Image Version", - "default": 0, - "examples": [ - 0 - ] - }, - "coreos-assembler.image-input-checksum": { - "$id":"#/properties/coreos-assembler.image-input-checksum", - "type":"string", - "title":"Image input checksum", - "default":"", - "minLength": 64 - }, - "coreos-assembler.overrides-active": { - "$id":"#/properties/coreos-assembler.overrides-active", - "title":"Overrides Active", - "default":"", - "type": "boolean" - }, - "images": { - "$id":"#/properties/images", - "type":"object", - "title":"Build Artifacts", - "required": [ - "ostree" - ], - "optional": [ - "aliyun", - "aws", - "azure", - "azurestack", - "dasd", - "digitalocean", - "exoscale", - "gcp", - "ibmcloud", - "powervs", - "initramfs", - "iso", - "kernel", - "live-kernel", - "live-initramfs", - "live-iso", - "live-rootfs", - "metal", - "metal4k", - "nutanix", - "openstack", - "qemu", - "vmware", - "vultr" - ], - "properties": { - "ostree": { - "$id":"#/properties/images/properties/ostree", - "type":"object", - "title":"OSTree", - "$ref": "#/definitions/artifact" - }, - "dasd": { - "$id":"#/properties/images/properties/dasd", - "type":"object", - "title":"dasd", - "$ref": "#/definitions/artifact" - }, - "exoscale": { - "$id":"#/properties/images/properties/exoscale", - "type":"object", - "title":"exoscale", - "$ref": "#/definitions/artifact" - }, - "qemu": { - "$id":"#/properties/images/properties/qemu", - "type":"object", - "title":"Qemu", - "$ref": "#/definitions/artifact" - }, - "metal": { - "$id":"#/properties/images/properties/metal", - "type":"object", - "title":"Metal", - "$ref": "#/definitions/artifact" - }, - "metal4k": { - "$id":"#/properties/images/properties/metal4k", - "type":"object", - "title":"Metal (4K native)", - "$ref": "#/definitions/artifact" - }, - "iso": { - "$id":"#/properties/images/properties/iso", - "type":"object", - "title":"ISO", - "$ref": "#/definitions/artifact" - }, - "kernel": { - "$id":"#/properties/images/properties/kernel", - "type":"object", - "title":"Kernel", - "$ref": "#/definitions/artifact" - }, - "initramfs": { - "$id":"#/properties/images/properties/initramfs", - "type":"object", - "title":"Initramfs", - "$ref": "#/definitions/artifact" - }, - "live-kernel": { - "$id":"#/properties/images/properties/live-kernel", - "type":"object", - "title":"Live Kernel", - "$ref": "#/definitions/artifact" - }, - "live-initramfs": { - "$id":"#/properties/images/properties/live-initramfs", - "type":"object", - "title":"Live Initramfs", - "$ref": "#/definitions/artifact" - }, - "live-iso": { - "$id":"#/properties/images/properties/live-iso", - "type":"object", - "title":"Live ISO", - "$ref": "#/definitions/artifact" - }, - "live-rootfs": { - "$id":"#/properties/images/properties/live-rootfs", - "type":"object", - "title":"Live Rootfs", - "$ref": "#/definitions/artifact" - }, - "nutanix": { - "$id":"#/properties/images/properties/nutanix", - "type":"object", - "title":"Nutanix", - "$ref": "#/definitions/artifact" - }, - "openstack": { - "$id":"#/properties/images/properties/openstack", - "type":"object", - "title":"OpenStack", - "$ref": "#/definitions/artifact" - }, - "vmware": { - "$id":"#/properties/images/properties/vmware", - "type":"object", - "title":"VMWare", - "$ref": "#/definitions/artifact" - }, - "vultr": { - "$id": "#/properties/images/properties/vultr", - "type": "object", - "title": "Vultr", - "$ref": "#/definitions/artifact" - }, - "aliyun": { - "$id":"#/properties/images/properties/aliyun", - "type":"object", - "title":"Aliyun", - "$ref": "#/definitions/artifact" - }, - "aws": { - "$id":"#/properties/images/properties/aws", - "type":"object", - "title":"AWS", - "$ref": "#/definitions/artifact" - }, - "azure": { - "$id":"#/properties/images/properties/azure", - "type":"object", - "title":"Azure", - "$ref": "#/definitions/artifact" - }, - "azurestack": { - "$id":"#/properties/images/properties/azurestack", - "type":"object", - "title":"AzureStack", - "$ref": "#/definitions/artifact" - }, - "digitalocean": { - "$id":"#/properties/images/properties/digitalocean", - "type":"object", - "title":"DigitalOcean", - "$ref": "#/definitions/artifact" - }, - "ibmcloud": { - "$id":"#/properties/images/properties/ibmcloud", - "type":"object", - "title":"IBM Cloud", - "$ref": "#/definitions/artifact" - }, - "powervs": { - "$id":"#/properties/images/properties/powervs", - "type":"object", - "title":"Power Virtual Server", - "$ref": "#/definitions/artifact" - }, - "gcp": { - "$id":"#/properties/images/properties/gcp", - "type":"object", - "title":"GCP", - "$ref": "#/definitions/artifact" - } - } - }, - "name": { - "$id":"#/properties/name", - "type":"string", - "title":"Name", - "default":"fedora-coreos", - "examples": [ - "rhcos", - "fedora-coreos" - ] - }, - "oscontainer": { - "$id":"#/properties/oscontainer", - "type":"object", - "title":"Oscontainer", - "$ref": "#/definitions/image" - }, - "extensions": { - "$id":"#/properties/extensions", - "type":"object", - "title":"Extensions", - "required": [ - "path", - "sha256", - "rpm-ostree-state", - "manifest" - ], - "properties": { - "path": { - "$id": "#/artifact/Path", - "type":"string", - "title":"Path" - }, - "sha256": { - "$id": "#/artifact/sha256", - "type":"string", - "title":"SHA256" - }, - "rpm-ostree-state": { - "$id":"#/properties/extensions/items/properties/rpm-ostree-state", - "type":"string", - "title":"RpmOstreeState", - "default":"", - "minLength": 64 - }, - "manifest": { - "$id":"#/properties/extensions/items/properties/manifest", - "type":"object", - "title":"Manifest" - } - } - }, - "ostree-commit": { - "$id":"#/properties/ostree-commit", - "type":"string", - "title":"ostree-commit", - "default":"", - "minLength": 64 - }, - "ostree-content-bytes-written": { - "$id":"#/properties/ostree-content-bytes-written", - "type":"integer", - "title":"ostree-content-bytes-written", - "default": 0 - }, - "ostree-content-checksum": { - "$id":"#/properties/ostree-content-checksum", - "type":"string", - "title":"ostree-content-checksum", - "default":"", - "minLength": 64 - }, - "ostree-n-cache-hits": { - "$id":"#/properties/ostree-n-cache-hits", - "type":"integer", - "title":"ostree-n-cache-hits", - "default": 0 - }, - "ostree-n-content-total": { - "$id":"#/properties/ostree-n-content-total", - "type":"integer", - "title":"ostree-n-content-total", - "default": 0 - }, - "ostree-n-content-written": { - "$id":"#/properties/ostree-n-content-written", - "type":"integer", - "title":"ostree-n-content-written", - "default": 0 - }, - "ostree-n-metadata-total": { - "$id":"#/properties/ostree-n-metadata-total", - "type":"integer", - "title":"ostree-n-metadata-total", - "default": 0 - }, - "ostree-n-metadata-written": { - "$id":"#/properties/ostree-n-metadata-written", - "type":"integer", - "title":"ostree-n-metadata-written", - "default": 0 - }, - "ostree-timestamp": { - "$id":"#/properties/ostree-timestamp", - "type":"string", - "title":"ostree timestamp", - "default":"", - "examples": [ - "2020-01-15T19:31:31Z" - ], - "pattern":"\\d{4}-\\d{2}-\\d{2}T.*Z$" - }, - "ostree-version": { - "$id":"#/properties/ostree-version", - "type":"string", - "title":"ostree version", - "default":"", - "minLength": 1 - }, - "pkgdiff": { - "$id":"#/properties/pkgdiff", - "type":"array", - "title":"pkgdiff between builds", - "$ref": "#/definitions/pkg-items" - }, - "parent-pkgdiff": { - "$id":"#/properties/parent-pkgdiff", - "type":"array", - "title":"pkgdiff against parent", - "$ref": "#/definitions/pkg-items" - }, - "advisories-diff": { - "$id":"#/properties/advisories-diff", - "type":"array", - "title":"advisory diff between builds", - "$ref": "#/definitions/advisory-items" - }, - "parent-advisories-diff": { - "$id":"#/properties/parent-advisory-diff", - "type":"array", - "title":"advisory diff against parent", - "$ref": "#/definitions/advisory-items" - }, - "rpm-ostree-inputhash": { - "$id":"#/properties/rpm-ostree-inputhash", - "type":"string", - "title":"input has of the rpm-ostree", - "default":"", - "minLength": 64 - }, - "summary": { - "$id":"#/properties/summary", - "type":"string", - "title":"Build Summary", - "default":"", - "minLength": 1 - }, - "aliyun": { - "$id":"#/properties/aliyun", - "type":"array", - "title":"Alibaba/Aliyun Uploads", - "items": { - "$id":"#/properties/aliyun/images", - "type":"object", - "title":"Aliyun Image", - "required": [ - "name", - "id" - ], - "properties": { - "name": { - "$id":"#/properties/aliyun/items/properties/name", - "type":"string", - "title":"Region", - "default":"", - "minLength": 1 - }, - "id": { - "$id":"#/properties/aliyun/items/properties/id", - "type":"string", - "title":"ImageID", - "default":"", - "minLength": 1 - } - } - } - }, - "amis": { - "$id":"#/properties/amis", - "type":"array", - "title":"AMIS", - "items": { - "$id":"#/properties/amis/items", - "type":"object", - "title":"AMIS", - "required": [ - "name", - "hvm", - "snapshot" - ], - "properties": { - "name": { - "$id":"#/properties/amis/items/properties/name", - "type":"string", - "title":"Region", - "default":"" - }, - "hvm": { - "$id":"#/properties/amis/items/properties/hvm", - "type":"string", - "title":"HVM", - "default":"" - }, - "snapshot": { - "$id":"#/properties/amis/items/properties/snapshot", - "type":"string", - "title":"Snapshot", - "default":"" - } - } - } - }, - "azure": { - "$id":"#/properties/azure", - "type":"object", - "title":"Azure", - "$ref": "#/definitions/cloudartifact" - }, - "gcp": { - "$id":"#/properties/gcp", - "type":"object", - "title":"GCP", - "required": [ - "image", - "url" - ], - "optional": [ - "family", - "project" - ], - "properties": { - "image": { - "$id":"#/properties/gcp/image", - "type":"string", - "title":"Image Name" - }, - "url": { - "$id":"#/properties/gcp/url", - "type":"string", - "title":"URL" - }, - "project": { - "$id":"#/properties/gcp/project", - "type":"string", - "title":"Image Project" - }, - "family": { - "$id":"#/properties/gcp/family", - "type":"string", - "title":"Image Family" - } - } - }, - "ibmcloud": { - "$id":"#/properties/ibmcloud", - "type":"array", - "title":"IBM Cloud", - "items": { - "type":"object", - "$ref": "#/definitions/cloudartifact" - } - }, - "powervs": { - "$id":"#/properties/powervs", - "type":"array", - "title":"Power Virtual Server", - "items": { - "type":"object", - "$ref": "#/definitions/cloudartifact" - } - }, - "release-payload": { - "$id":"#/properties/release-payload", - "type":"object", - "title":"ReleasePayload", - "$ref": "#/definitions/image" - } - } -} -` diff --git a/mantle/vendor/github.com/minio/sha256-simd/LICENSE b/mantle/vendor/github.com/coreos/coreos-assembler/LICENSE similarity index 99% rename from mantle/vendor/github.com/minio/sha256-simd/LICENSE rename to mantle/vendor/github.com/coreos/coreos-assembler/LICENSE index d6456956..e06d2081 100644 --- a/mantle/vendor/github.com/minio/sha256-simd/LICENSE +++ b/mantle/vendor/github.com/coreos/coreos-assembler/LICENSE @@ -1,5 +1,4 @@ - - Apache License +Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -179,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -187,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/build.go b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/build.go similarity index 88% rename from mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/build.go rename to mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/build.go index 5e7cd1a8..bd5ba408 100644 --- a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/build.go +++ b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/build.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package cosa +package builds import ( "encoding/json" @@ -24,12 +24,12 @@ import ( "path/filepath" "reflect" "regexp" - "runtime" "sort" "strings" "github.com/pkg/errors" log "github.com/sirupsen/logrus" + coreosarch "github.com/coreos/stream-metadata-go/arch" ) var ( @@ -51,6 +51,11 @@ const ( CosaMetaJSON = "meta.json" ) +type objectInfo struct { + info os.FileInfo + name string +} + // SetArch overrides the build arch func SetArch(a string) { forceArch = a @@ -62,11 +67,26 @@ func BuilderArch() string { if forceArch != "" { return forceArch } - arch := runtime.GOARCH - if arch == "amd64" { - arch = "x86_64" - } - return arch + return coreosarch.CurrentRpmArch() +} + +// defaultWalkFunc walks over a directory and returns a channel of os.FileInfo +func walkFn(p string) <-chan *objectInfo { + ret := make(chan *objectInfo) + go func() { + defer close(ret) //nolint + _ = filepath.Walk(p, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + ret <- &objectInfo{ + name: filepath.Join(p, info.Name()), + info: info, + } + return nil + }) + }() + return ret } // ReadBuild returns a build upon finding a meta.json. Returns a Build, the path string @@ -93,7 +113,7 @@ func ReadBuild(dir, buildID, arch string) (*Build, string, error) { } p := filepath.Join(dir, buildID, arch) - f, err := Open(filepath.Join(p, CosaMetaJSON)) + f, err := os.Open(filepath.Join(p, CosaMetaJSON)) if err != nil { return nil, "", fmt.Errorf("failed to open %s to read meta.json: %w", p, err) } @@ -113,14 +133,14 @@ func ReadBuild(dir, buildID, arch string) (*Build, string, error) { if !ok { break } - if fi == nil || fi.IsDir() || fi.Name() == CosaMetaJSON { + if fi == nil || fi.info.IsDir() || fi.info.Name() == CosaMetaJSON { continue } - if !IsMetaJSON(fi.Name()) { + if !IsMetaJSON(fi.info.Name()) { continue } - log.WithField("extra meta.json", fi.Name()).Info("found meta") - f, err := Open(filepath.Join(p, fi.Name())) + log.WithField("extra meta.json", fi.name).Info("found meta") + f, err := os.Open(filepath.Join(p, fi.info.Name())) if err != nil { return b, p, err } @@ -146,7 +166,7 @@ func buildParser(r io.Reader) (*Build, error) { // ParseBuild parses the meta.json and reutrns a build func ParseBuild(path string) (*Build, error) { - f, err := Open(path) + f, err := os.Open(path) if err != nil { return nil, errors.Wrapf(err, "failed to open %s", path) } @@ -178,7 +198,7 @@ func (build *Build) GetArtifact(artifact string) (*Artifact, error) { if ok && r.Path != "" { return r, nil } - return nil, errors.New("artifact not defined") + return nil, errors.New("artifact " + artifact + " not defined") } // IsArtifact takes a path and returns the artifact type and a bool if @@ -305,6 +325,10 @@ func FetchAndParseBuild(url string) (*Build, error) { return nil, err } defer res.Body.Close() + if res.StatusCode != 200 { + return nil, fmt.Errorf( + "Received a %d error in http response for: %s", res.StatusCode, url) + } return buildParser(res.Body) } diff --git a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/builds.go b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/builds.go similarity index 96% rename from mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/builds.go rename to mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/builds.go index 31f50c5c..d0fcef1d 100644 --- a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/builds.go +++ b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/builds.go @@ -1,9 +1,10 @@ -package cosa +package builds import ( "bytes" "encoding/json" "io" + "os" "path/filepath" "github.com/pkg/errors" @@ -35,7 +36,7 @@ type buildsJSON struct { func getBuilds(dir string) (*buildsJSON, error) { path := filepath.Join(dir, CosaBuildsJSON) - f, err := Open(path) + f, err := os.Open(path) if err != nil { return nil, ErrNoBuildsFound } diff --git a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/cosa_v1.go b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/cosa_v1.go similarity index 67% rename from mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/cosa_v1.go rename to mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/cosa_v1.go index 1b9875a8..3e758a24 100644 --- a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/cosa_v1.go +++ b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/cosa_v1.go @@ -1,6 +1,7 @@ -package cosa +package builds // generated by 'make schema' +// source hash: ce57421ef973f926c59065fd4eb122d5d22fe76681f952376091631178ce5dc1 type AdvisoryDiff []AdvisoryDiffItems @@ -33,13 +34,15 @@ type Build struct { Amis []Amis `json:"amis,omitempty"` Architecture string `json:"coreos-assembler.basearch,omitempty"` Azure *Cloudartifact `json:"azure,omitempty"` + BaseOsContainer *Image `json:"base-oscontainer,omitempty"` BuildArtifacts *BuildArtifacts `json:"images,omitempty"` BuildID string `json:"buildid"` BuildRef string `json:"ref,omitempty"` - BuildSummary string `json:"summary"` + BuildSummary string `json:"summary,omitempty"` BuildTimeStamp string `json:"coreos-assembler.build-timestamp,omitempty"` BuildURL string `json:"build-url,omitempty"` ConfigGitRev string `json:"coreos-assembler.config-gitrev,omitempty"` + ConfigVariant string `json:"coreos-assembler.config-variant,omitempty"` ContainerConfigGit *Git `json:"coreos-assembler.container-config-git,omitempty"` CoreOsSource string `json:"coreos-assembler.code-source,omitempty"` CosaContainerImageGit *Git `json:"coreos-assembler.container-image-git,omitempty"` @@ -47,6 +50,7 @@ type Build struct { CosaImageChecksum string `json:"coreos-assembler.image-config-checksum,omitempty"` CosaImageVersion int `json:"coreos-assembler.image-genver,omitempty"` Extensions *Extensions `json:"extensions,omitempty"` + ExtensionsContainer *Image `json:"extensions-container,omitempty"` FedoraCoreOsParentCommit string `json:"fedora-coreos.parent-commit,omitempty"` FedoraCoreOsParentVersion string `json:"fedora-coreos.parent-version,omitempty"` Gcp *Gcp `json:"gcp,omitempty"` @@ -55,6 +59,7 @@ type Build struct { ImageInputChecksum string `json:"coreos-assembler.image-input-checksum,omitempty"` InputHasOfTheRpmOstree string `json:"rpm-ostree-inputhash"` Koji *Koji `json:"koji,omitempty"` + KubeVirt *KubeVirt `json:"kubevirt,omitempty"` MetaStamp float64 `json:"coreos-assembler.meta-stamp,omitempty"` Name string `json:"name"` Oscontainer *Image `json:"oscontainer,omitempty"` @@ -73,34 +78,41 @@ type Build struct { PkgdiffBetweenBuilds PackageSetDifferences `json:"pkgdiff,omitempty"` PowerVirtualServer []Cloudartifact `json:"powervs,omitempty"` ReleasePayload *Image `json:"release-payload,omitempty"` + S3 *S3 `json:"s3,omitempty"` + YumReposGit *Git `json:"coreos-assembler.yumrepos-git,omitempty"` } type BuildArtifacts struct { - Aliyun *Artifact `json:"aliyun,omitempty"` - Aws *Artifact `json:"aws,omitempty"` - Azure *Artifact `json:"azure,omitempty"` - AzureStack *Artifact `json:"azurestack,omitempty"` - Dasd *Artifact `json:"dasd,omitempty"` - DigitalOcean *Artifact `json:"digitalocean,omitempty"` - Exoscale *Artifact `json:"exoscale,omitempty"` - Gcp *Artifact `json:"gcp,omitempty"` - IbmCloud *Artifact `json:"ibmcloud,omitempty"` - Initramfs *Artifact `json:"initramfs,omitempty"` - Iso *Artifact `json:"iso,omitempty"` - Kernel *Artifact `json:"kernel,omitempty"` - LiveInitramfs *Artifact `json:"live-initramfs,omitempty"` - LiveIso *Artifact `json:"live-iso,omitempty"` - LiveKernel *Artifact `json:"live-kernel,omitempty"` - LiveRootfs *Artifact `json:"live-rootfs,omitempty"` - Metal *Artifact `json:"metal,omitempty"` - Metal4KNative *Artifact `json:"metal4k,omitempty"` - Nutanix *Artifact `json:"nutanix,omitempty"` - OpenStack *Artifact `json:"openstack,omitempty"` - Ostree Artifact `json:"ostree"` - PowerVirtualServer *Artifact `json:"powervs,omitempty"` - Qemu *Artifact `json:"qemu,omitempty"` - Vmware *Artifact `json:"vmware,omitempty"` - Vultr *Artifact `json:"vultr,omitempty"` + Aliyun *Artifact `json:"aliyun,omitempty"` + Aws *Artifact `json:"aws,omitempty"` + Azure *Artifact `json:"azure,omitempty"` + AzureStack *Artifact `json:"azurestack,omitempty"` + Dasd *Artifact `json:"dasd,omitempty"` + DigitalOcean *Artifact `json:"digitalocean,omitempty"` + Exoscale *Artifact `json:"exoscale,omitempty"` + ExtensionsContainer *Artifact `json:"extensions-container,omitempty"` + Gcp *Artifact `json:"gcp,omitempty"` + IbmCloud *Artifact `json:"ibmcloud,omitempty"` + Initramfs *Artifact `json:"initramfs,omitempty"` + Iso *Artifact `json:"iso,omitempty"` + Kernel *Artifact `json:"kernel,omitempty"` + KubeVirt *Artifact `json:"kubevirt,omitempty"` + LegacyOscontainer *Artifact `json:"legacy-oscontainer,omitempty"` + LiveInitramfs *Artifact `json:"live-initramfs,omitempty"` + LiveIso *Artifact `json:"live-iso,omitempty"` + LiveKernel *Artifact `json:"live-kernel,omitempty"` + LiveRootfs *Artifact `json:"live-rootfs,omitempty"` + Metal *Artifact `json:"metal,omitempty"` + Metal4KNative *Artifact `json:"metal4k,omitempty"` + Nutanix *Artifact `json:"nutanix,omitempty"` + OpenStack *Artifact `json:"openstack,omitempty"` + Ostree Artifact `json:"ostree"` + PowerVirtualServer *Artifact `json:"powervs,omitempty"` + Qemu *Artifact `json:"qemu,omitempty"` + SecureExecutionQemu *Artifact `json:"qemu-secex,omitempty"` + VirtualBox *Artifact `json:"virtualbox,omitempty"` + Vmware *Artifact `json:"vmware,omitempty"` + Vultr *Artifact `json:"vultr,omitempty"` } type Cloudartifact struct { @@ -134,8 +146,9 @@ type Git struct { type Image struct { Comment string `json:"comment,omitempty"` - Digest string `json:"digest"` + Digest string `json:"digest,omitempty"` Image string `json:"image"` + Tags []Tag `json:"tags,omitempty"` } type Koji struct { @@ -144,6 +157,18 @@ type Koji struct { KojiToken string `json:"token,omitempty"` } +type KubeVirt struct { + Image string `json:"image"` +} + type PackageSetDifferences []PackageSetDifferencesItems type PackageSetDifferencesItems interface{} + +type S3 struct { + Bucket string `json:"bucket,omitempty"` + Key string `json:"key,omitempty"` + PublicURL string `json:"public-url,omitempty"` +} + +type Tag string diff --git a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/schema.go b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/schema.go similarity index 99% rename from mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/schema.go rename to mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/schema.go index c712e678..9a37cf56 100644 --- a/mantle/vendor/github.com/coreos/coreos-assembler-schema/cosa/schema.go +++ b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/schema.go @@ -1,4 +1,4 @@ -package cosa +package builds import ( "encoding/json" diff --git a/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/schema_doc.go b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/schema_doc.go new file mode 100644 index 00000000..2c5aecc4 --- /dev/null +++ b/mantle/vendor/github.com/coreos/coreos-assembler/pkg/builds/schema_doc.go @@ -0,0 +1,980 @@ +// Generated by ./generate-schema.sh +// Source hash: ce57421ef973f926c59065fd4eb122d5d22fe76681f952376091631178ce5dc1 +// DO NOT EDIT + +package builds + +var generatedSchemaJSON = `{ + "definitions": { + "artifact": { + "type": "object", + "properties": { + "path": { + "$id": "#/artifact/Path", + "type": "string", + "title": "Path" + }, + "sha256": { + "$id": "#/artifact/sha256", + "type": "string", + "title": "SHA256" + }, + "size": { + "$id": "#/artifact/size", + "type": "number", + "title": "Size in bytes" + }, + "skip-compression": { + "$id": "#/artifact/skip-compression", + "type": "boolean", + "title": "Skip compression", + "description": "Artifact should not be compressed or decompressed before use", + "default": false + }, + "uncompressed-sha256": { + "$id": "#/artifact/uncompressed-sha256", + "type": "string", + "title": "Uncompressed SHA256" + }, + "uncompressed-size": { + "$id": "#/artifact/uncompressed-size", + "type": "integer", + "title": "Uncompressed-size" + } + }, + "optional": [ + "size", + "uncompressed-sha256", + "uncompressed-size", + "skip-compression" + ], + "required": [ + "path", + "sha256" + ] + }, + "image": { + "type": "object", + "required": [ + "image" + ], + "optional": [ + "digest", + "tags", + "comment" + ], + "properties": { + "digest": { + "$id": "#/image/digest", + "type": "string", + "title": "Digest" + }, + "comment": { + "$id": "#/image/comment", + "type": "string", + "title": "Comment" + }, + "image": { + "$id": "#/image/image", + "type": "string", + "title": "Image" + }, + "tags": { + "$id": "#/image/tags", + "type": "array", + "title": "Tags", + "items": { + "$id": "#/image/tags/item", + "title": "Tag", + "type": "string" + } + } + } + }, + "cloudartifact": { + "type": "object", + "required": [ + "url" + ], + "optional": [ + "image", + "object", + "bucket", + "region" + ], + "properties": { + "image": { + "$id": "#/cloudartifact/image", + "type": "string", + "title": "Image" + }, + "url": { + "$id": "#/cloudartifact/url", + "type": "string", + "title": "URL" + }, + "bucket": { + "$id": "#/cloudartifact/bucket", + "type": "string", + "title": "Bucket" + }, + "region": { + "$id": "#/cloudartifact/region", + "type": "string", + "title": "Region" + }, + "object": { + "$id": "#/cloudartifact/object", + "type": "string", + "title": "Object" + } + } + }, + "git": { + "type": "object", + "required": [ + "commit", + "origin" + ], + "optional": [ + "branch", + "dirty" + ], + "properties": { + "branch": { + "$id": "#/git/branch", + "type": "string", + "title": "branch", + "default": "", + "examples": [ + "HEAD" + ], + "minLength": 3 + }, + "commit": { + "$id": "#/git/commit", + "type": "string", + "title": "commit", + "default": "", + "examples": [ + "742edc307e58f35824d906958b6493510e12b593" + ], + "minLength": 5 + }, + "dirty": { + "$id": "#/git/dirty", + "type": "string", + "title": "dirty", + "default": "", + "examples": [ + "true" + ], + "minLength": 1 + }, + "origin": { + "$id": "#/git/origin", + "type": "string", + "title": "origin", + "default": "", + "examples": [ + "https://github.com/coreos/fedora-coreos-config" + ], + "minLength": 1 + } + } + }, + "pkg-items": { + "type": "array", + "title": "Package Set differences", + "items": { + "$id": "#/pkgdiff/items/item", + "title": "Items", + "default": "", + "minLength": 1 + } + }, + "advisory-items": { + "type": "array", + "title": "Advisory diff", + "items": { + "$id": "#/advisory-diff/items/item", + "title": "Items", + "default": "" + } + } + }, + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://github.com/coreos/coreos-assembler/blob/main/v1.json.json", + "type": "object", + "title": "CoreOS Assember v1 meta.json schema", + "required": [ + "buildid", + "name", + "ostree-commit", + "ostree-content-checksum", + "ostree-timestamp", + "ostree-version", + "rpm-ostree-inputhash" + ], + "optional": [ + "aliyun", + "amis", + "azure", + "azurestack", + "base-oscontainer", + "build-url", + "digitalocean", + "exoscale", + "gcp", + "kubevirt", + "ibmcloud", + "powervs", + "images", + "koji", + "oscontainer", + "extensions", + "extensions-container", + "parent-pkgdiff", + "pkgdiff", + "parent-advisories-diff", + "advisories-diff", + "release-payload", + "summary", + "s3", + "coreos-assembler.basearch", + "coreos-assembler.build-timestamp", + "coreos-assembler.code-source", + "coreos-assembler.config-dirty", + "coreos-assembler.config-gitrev", + "coreos-assembler.config-variant", + "coreos-assembler.container-config-git", + "coreos-assembler.container-image-git", + "coreos-assembler.delayed-meta-merge", + "coreos-assembler.image-config-checksum", + "coreos-assembler.image-genver", + "coreos-assembler.image-input-checksum", + "coreos-assembler.meta-stamp", + "coreos-assembler.overrides-active", + "coreos-assembler.yumrepos-git", + "fedora-coreos.parent-commit", + "fedora-coreos.parent-version", + "ref" + ], + "additionalProperties": false, + "properties": { + "ref": { + "$id": "#/properties/ref", + "type": "string", + "title": "BuildRef", + "default": "", + "minLength": 1 + }, + "build-url": { + "$id": "#/properties/build-url", + "type": "string", + "title": "Build URL", + "default": "", + "minLength": 1 + }, + "buildid": { + "$id": "#/properties/buildid", + "type": "string", + "title": "BuildID", + "default": "", + "minLength": 1 + }, + "s3": { + "type": "object", + "properties": { + "bucket": { + "$id": "#/properties/bucket", + "type": "string", + "title": "Bucket" + }, + "key": { + "$id": "#/properties/key", + "type": "string", + "title": "Key" + }, + "public-url": { + "$id": "#/properties/public-url", + "type": "string", + "title": "Public URL" + } + } + }, + "koji": { + "type": "object", + "properties": { + "build_id": { + "$id": "#/properties/kojiid", + "type": "number", + "title": "Koji Build ID" + }, + "token": { + "$id": "#/properties/kojitoken", + "type": "string", + "title": "Koji Token" + }, + "release": { + "$id": "#/properties/buildrelease", + "type": "string", + "title": "Build Release" + } + } + }, + "coreos-assembler.basearch": { + "$id": "#/properties/coreos-assembler.basearch", + "type": "string", + "title": "Architecture", + "default": "", + "minLength": 1 + }, + "coreos-assembler.build-timestamp": { + "$id": "#/properties/coreos-assembler.build-timestamp", + "type": "string", + "title": "Build Time Stamp", + "default": "", + "minLength": 1 + }, + "coreos-assembler.code-source": { + "$id": "#/properties/coreos-assembler.code-source", + "type": "string", + "title": "CoreOS Source", + "default": "", + "minLength": 1 + }, + "coreos-assembler.config-dirty": { + "$id": "#/properties/coreos-assembler.config-dirty", + "type": "string", + "title": "GitDirty", + "default": "", + "minLength": 1 + }, + "coreos-assembler.config-gitrev": { + "$id": "#/properties/coreos-assembler.config-gitrev", + "type": "string", + "title": "Config GitRev", + "default": "", + "minLength": 1 + }, + "coreos-assembler.config-variant": { + "$id": "#/properties/coreos-assembler.config-variant", + "type": "string", + "title": "Config variant", + "default": "" + }, + "coreos-assembler.container-config-git": { + "$id": "#/properties/coreos-assembler.container-config-git", + "type": "object", + "title": "Container Config Git", + "$ref": "#/definitions/git" + }, + "coreos-assembler.container-image-git": { + "$id": "#/properties/coreos-assembler.container-image-git", + "type": "object", + "title": "COSA Container Image Git", + "$ref": "#/definitions/git" + }, + "coreos-assembler.delayed-meta-merge": { + "$id": "#/properties/coreos-assembler.delayed-meta-merge", + "type": "boolean", + "title": "COSA Delayed Meta Merge", + "default": "False" + }, + "coreos-assembler.yumrepos-git": { + "$id": "#/properties/coreos-assembler.yumrepos-git", + "type": "object", + "title": "YUM repos Git", + "$ref": "#/definitions/git" + }, + "coreos-assembler.meta-stamp": { + "$id": "#/properties/coreos-assembler.meta-stamp", + "type": "number", + "title": "Meta Stamp", + "default": "", + "minLength": 16 + }, + "fedora-coreos.parent-version": { + "$id": "#/properties/fedora-coreos.parent-version", + "type": "string", + "title": "Fedora CoreOS Parent Version", + "default": "", + "minLength": 12 + }, + "fedora-coreos.parent-commit": { + "$id": "#/properties/fedora-coreos.parent-commit", + "type": "string", + "title": "Fedora CoreOS parent commit", + "default": "", + "examples": [ + "f15f5b25cf138a7683e3d200c53ece2091bf71d31332135da87892ab72ff4ee3" + ], + "minLength": 64 + }, + "coreos-assembler.image-config-checksum": { + "$id": "#/properties/coreos-assembler.image-config-checksum", + "type": "string", + "title": "COSA image checksum", + "default": "", + "minLength": 64 + }, + "coreos-assembler.image-genver": { + "$id": "#/properties/coreos-assembler.image-genver", + "type": "integer", + "title": "COSA Image Version", + "default": 0, + "examples": [ + 0 + ] + }, + "coreos-assembler.image-input-checksum": { + "$id": "#/properties/coreos-assembler.image-input-checksum", + "type": "string", + "title": "Image input checksum", + "default": "", + "minLength": 64 + }, + "coreos-assembler.overrides-active": { + "$id": "#/properties/coreos-assembler.overrides-active", + "title": "Overrides Active", + "default": "", + "type": "boolean" + }, + "images": { + "$id": "#/properties/images", + "type": "object", + "title": "Build Artifacts", + "required": [ + "ostree" + ], + "optional": [ + "aliyun", + "aws", + "azure", + "azurestack", + "dasd", + "digitalocean", + "exoscale", + "extensions-container", + "legacy-oscontainer", + "gcp", + "kubevirt", + "ibmcloud", + "powervs", + "initramfs", + "iso", + "kernel", + "live-kernel", + "live-initramfs", + "live-iso", + "live-rootfs", + "metal", + "metal4k", + "nutanix", + "openstack", + "qemu", + "virtualbox", + "vmware", + "vultr", + "qemu-secex" + ], + "properties": { + "ostree": { + "$id": "#/properties/images/properties/ostree", + "type": "object", + "title": "OSTree", + "$ref": "#/definitions/artifact" + }, + "dasd": { + "$id": "#/properties/images/properties/dasd", + "type": "object", + "title": "dasd", + "$ref": "#/definitions/artifact" + }, + "exoscale": { + "$id": "#/properties/images/properties/exoscale", + "type": "object", + "title": "exoscale", + "$ref": "#/definitions/artifact" + }, + "extensions-container": { + "$id": "#/properties/images/properties/extensions-container", + "type": "object", + "title": "extensions-container", + "$ref": "#/definitions/artifact" + }, + "legacy-oscontainer": { + "$id": "#/properties/images/properties/legacy-oscontainer", + "type": "object", + "title": "legacy-oscontainer", + "$ref": "#/definitions/artifact" + }, + "qemu": { + "$id": "#/properties/images/properties/qemu", + "type": "object", + "title": "Qemu", + "$ref": "#/definitions/artifact" + }, + "qemu-secex": { + "$id": "#/properties/images/properties/secex", + "type": "object", + "title": "Secure Execution Qemu", + "$ref": "#/definitions/artifact" + }, + "metal": { + "$id": "#/properties/images/properties/metal", + "type": "object", + "title": "Metal", + "$ref": "#/definitions/artifact" + }, + "metal4k": { + "$id": "#/properties/images/properties/metal4k", + "type": "object", + "title": "Metal (4K native)", + "$ref": "#/definitions/artifact" + }, + "iso": { + "$id": "#/properties/images/properties/iso", + "type": "object", + "title": "ISO", + "$ref": "#/definitions/artifact" + }, + "kernel": { + "$id": "#/properties/images/properties/kernel", + "type": "object", + "title": "Kernel", + "$ref": "#/definitions/artifact" + }, + "initramfs": { + "$id": "#/properties/images/properties/initramfs", + "type": "object", + "title": "Initramfs", + "$ref": "#/definitions/artifact" + }, + "live-kernel": { + "$id": "#/properties/images/properties/live-kernel", + "type": "object", + "title": "Live Kernel", + "$ref": "#/definitions/artifact" + }, + "live-initramfs": { + "$id": "#/properties/images/properties/live-initramfs", + "type": "object", + "title": "Live Initramfs", + "$ref": "#/definitions/artifact" + }, + "live-iso": { + "$id": "#/properties/images/properties/live-iso", + "type": "object", + "title": "Live ISO", + "$ref": "#/definitions/artifact" + }, + "live-rootfs": { + "$id": "#/properties/images/properties/live-rootfs", + "type": "object", + "title": "Live Rootfs", + "$ref": "#/definitions/artifact" + }, + "nutanix": { + "$id": "#/properties/images/properties/nutanix", + "type": "object", + "title": "Nutanix", + "$ref": "#/definitions/artifact" + }, + "openstack": { + "$id": "#/properties/images/properties/openstack", + "type": "object", + "title": "OpenStack", + "$ref": "#/definitions/artifact" + }, + "virtualbox": { + "$id": "#/properties/images/properties/virtualbox", + "type": "object", + "title": "VirtualBox", + "$ref": "#/definitions/artifact" + }, + "vmware": { + "$id": "#/properties/images/properties/vmware", + "type": "object", + "title": "VMWare", + "$ref": "#/definitions/artifact" + }, + "vultr": { + "$id": "#/properties/images/properties/vultr", + "type": "object", + "title": "Vultr", + "$ref": "#/definitions/artifact" + }, + "aliyun": { + "$id": "#/properties/images/properties/aliyun", + "type": "object", + "title": "Aliyun", + "$ref": "#/definitions/artifact" + }, + "aws": { + "$id": "#/properties/images/properties/aws", + "type": "object", + "title": "AWS", + "$ref": "#/definitions/artifact" + }, + "azure": { + "$id": "#/properties/images/properties/azure", + "type": "object", + "title": "Azure", + "$ref": "#/definitions/artifact" + }, + "azurestack": { + "$id": "#/properties/images/properties/azurestack", + "type": "object", + "title": "AzureStack", + "$ref": "#/definitions/artifact" + }, + "digitalocean": { + "$id": "#/properties/images/properties/digitalocean", + "type": "object", + "title": "DigitalOcean", + "$ref": "#/definitions/artifact" + }, + "ibmcloud": { + "$id": "#/properties/images/properties/ibmcloud", + "type": "object", + "title": "IBM Cloud", + "$ref": "#/definitions/artifact" + }, + "powervs": { + "$id": "#/properties/images/properties/powervs", + "type": "object", + "title": "Power Virtual Server", + "$ref": "#/definitions/artifact" + }, + "gcp": { + "$id": "#/properties/images/properties/gcp", + "type": "object", + "title": "GCP", + "$ref": "#/definitions/artifact" + }, + "kubevirt": { + "$id": "#/properties/images/properties/kubevirt", + "type": "object", + "title": "KubeVirt", + "$ref": "#/definitions/artifact" + } + } + }, + "name": { + "$id": "#/properties/name", + "type": "string", + "title": "Name", + "default": "fedora-coreos", + "examples": [ + "rhcos", + "fedora-coreos" + ] + }, + "oscontainer": { + "$id": "#/properties/oscontainer", + "type": "object", + "title": "Oscontainer", + "$ref": "#/definitions/image" + }, + "extensions": { + "$id": "#/properties/extensions", + "type": "object", + "title": "Extensions", + "required": [ + "path", + "sha256", + "rpm-ostree-state", + "manifest" + ], + "properties": { + "path": { + "$id": "#/artifact/Path", + "type": "string", + "title": "Path" + }, + "sha256": { + "$id": "#/artifact/sha256", + "type": "string", + "title": "SHA256" + }, + "rpm-ostree-state": { + "$id": "#/properties/extensions/items/properties/rpm-ostree-state", + "type": "string", + "title": "RpmOstreeState", + "default": "", + "minLength": 64 + }, + "manifest": { + "$id": "#/properties/extensions/items/properties/manifest", + "type": "object", + "title": "Manifest" + } + } + }, + "ostree-commit": { + "$id": "#/properties/ostree-commit", + "type": "string", + "title": "ostree-commit", + "default": "", + "minLength": 64 + }, + "ostree-content-bytes-written": { + "$id": "#/properties/ostree-content-bytes-written", + "type": "integer", + "title": "ostree-content-bytes-written", + "default": 0 + }, + "ostree-content-checksum": { + "$id": "#/properties/ostree-content-checksum", + "type": "string", + "title": "ostree-content-checksum", + "default": "", + "minLength": 64 + }, + "ostree-n-cache-hits": { + "$id": "#/properties/ostree-n-cache-hits", + "type": "integer", + "title": "ostree-n-cache-hits", + "default": 0 + }, + "ostree-n-content-total": { + "$id": "#/properties/ostree-n-content-total", + "type": "integer", + "title": "ostree-n-content-total", + "default": 0 + }, + "ostree-n-content-written": { + "$id": "#/properties/ostree-n-content-written", + "type": "integer", + "title": "ostree-n-content-written", + "default": 0 + }, + "ostree-n-metadata-total": { + "$id": "#/properties/ostree-n-metadata-total", + "type": "integer", + "title": "ostree-n-metadata-total", + "default": 0 + }, + "ostree-n-metadata-written": { + "$id": "#/properties/ostree-n-metadata-written", + "type": "integer", + "title": "ostree-n-metadata-written", + "default": 0 + }, + "ostree-timestamp": { + "$id": "#/properties/ostree-timestamp", + "type": "string", + "title": "ostree timestamp", + "default": "", + "examples": [ + "2020-01-15T19:31:31Z" + ], + "pattern": "\\d{4}-\\d{2}-\\d{2}T.*Z$" + }, + "ostree-version": { + "$id": "#/properties/ostree-version", + "type": "string", + "title": "ostree version", + "default": "", + "minLength": 1 + }, + "pkgdiff": { + "$id": "#/properties/pkgdiff", + "type": "array", + "title": "pkgdiff between builds", + "$ref": "#/definitions/pkg-items" + }, + "parent-pkgdiff": { + "$id": "#/properties/parent-pkgdiff", + "type": "array", + "title": "pkgdiff against parent", + "$ref": "#/definitions/pkg-items" + }, + "advisories-diff": { + "$id": "#/properties/advisories-diff", + "type": "array", + "title": "advisory diff between builds", + "$ref": "#/definitions/advisory-items" + }, + "parent-advisories-diff": { + "$id": "#/properties/parent-advisory-diff", + "type": "array", + "title": "advisory diff against parent", + "$ref": "#/definitions/advisory-items" + }, + "rpm-ostree-inputhash": { + "$id": "#/properties/rpm-ostree-inputhash", + "type": "string", + "title": "input has of the rpm-ostree", + "default": "", + "minLength": 64 + }, + "summary": { + "$id": "#/properties/summary", + "type": "string", + "title": "Build Summary", + "default": "", + "minLength": 1 + }, + "aliyun": { + "$id": "#/properties/aliyun", + "type": "array", + "title": "Alibaba/Aliyun Uploads", + "items": { + "$id": "#/properties/aliyun/images", + "type": "object", + "title": "Aliyun Image", + "required": [ + "name", + "id" + ], + "properties": { + "name": { + "$id": "#/properties/aliyun/items/properties/name", + "type": "string", + "title": "Region", + "default": "", + "minLength": 1 + }, + "id": { + "$id": "#/properties/aliyun/items/properties/id", + "type": "string", + "title": "ImageID", + "default": "", + "minLength": 1 + } + } + } + }, + "amis": { + "$id": "#/properties/amis", + "type": "array", + "title": "AMIS", + "items": { + "$id": "#/properties/amis/items", + "type": "object", + "title": "AMIS", + "required": [ + "name", + "hvm", + "snapshot" + ], + "properties": { + "name": { + "$id": "#/properties/amis/items/properties/name", + "type": "string", + "title": "Region", + "default": "" + }, + "hvm": { + "$id": "#/properties/amis/items/properties/hvm", + "type": "string", + "title": "HVM", + "default": "" + }, + "snapshot": { + "$id": "#/properties/amis/items/properties/snapshot", + "type": "string", + "title": "Snapshot", + "default": "" + } + } + } + }, + "azure": { + "$id": "#/properties/azure", + "type": "object", + "title": "Azure", + "$ref": "#/definitions/cloudartifact" + }, + "base-oscontainer": { + "$id": "#/properties/base-oscontainer", + "type": "object", + "title": "Base OS container", + "$ref": "#/definitions/image" + }, + "extensions-container": { + "$id": "#/properties/extensions-container", + "type": "object", + "title": "Extensions container", + "$ref": "#/definitions/image" + }, + "gcp": { + "$id": "#/properties/gcp", + "type": "object", + "title": "GCP", + "required": [ + "image", + "url" + ], + "optional": [ + "family", + "project" + ], + "properties": { + "image": { + "$id": "#/properties/gcp/image", + "type": "string", + "title": "Image Name" + }, + "url": { + "$id": "#/properties/gcp/url", + "type": "string", + "title": "URL" + }, + "project": { + "$id": "#/properties/gcp/project", + "type": "string", + "title": "Image Project" + }, + "family": { + "$id": "#/properties/gcp/family", + "type": "string", + "title": "Image Family" + } + } + }, + "kubevirt": { + "$id": "#/properties/kubevirt", + "type": "object", + "title": "KubeVirt", + "required": [ + "image" + ], + "properties": { + "image": { + "$id": "#/properties/kubevirt/image", + "type": "string", + "title": "Image" + } + } + }, + "ibmcloud": { + "$id": "#/properties/ibmcloud", + "type": "array", + "title": "IBM Cloud", + "items": { + "type": "object", + "$ref": "#/definitions/cloudartifact" + } + }, + "powervs": { + "$id": "#/properties/powervs", + "type": "array", + "title": "Power Virtual Server", + "items": { + "type": "object", + "$ref": "#/definitions/cloudartifact" + } + }, + "release-payload": { + "$id": "#/properties/release-payload", + "type": "object", + "title": "ReleasePayload", + "$ref": "#/definitions/image" + } + } +} +` diff --git a/mantle/vendor/github.com/coreos/go-json/decode.go b/mantle/vendor/github.com/coreos/go-json/decode.go index 1966dcdf..b6f0df08 100644 --- a/mantle/vendor/github.com/coreos/go-json/decode.go +++ b/mantle/vendor/github.com/coreos/go-json/decode.go @@ -229,16 +229,19 @@ func (n Number) Int64() (int64, error) { return strconv.ParseInt(string(n), 10, 64) } +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + // decodeState represents the state while decoding a JSON value. type decodeState struct { - data []byte - off int // next read offset in data - opcode int // last read result - scan scanner - errorContext struct { // provides context for type errors - Struct reflect.Type - FieldStack []string - } + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext savedError error useNumber bool disallowUnknownFields bool @@ -258,10 +261,11 @@ func (d *decodeState) init(data []byte) *decodeState { d.data = data d.off = 0 d.savedError = nil - d.errorContext.Struct = nil - - // Reuse the allocated space for the FieldStack slice. - d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } return d } @@ -275,12 +279,11 @@ func (d *decodeState) saveError(err error) { // addErrorContext returns a new error enhanced with information from d.errorContext func (d *decodeState) addErrorContext(err error) error { - if d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0 { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() err.Field = strings.Join(d.errorContext.FieldStack, ".") - return err } } return err @@ -695,7 +698,10 @@ func (d *decodeState) object(v reflect.Value) error { } var mapElem reflect.Value - origErrorContext := d.errorContext + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } for { // Read opening " of string key or closing }. @@ -770,6 +776,9 @@ func (d *decodeState) object(v reflect.Value) error { } subv = subv.Field(i) } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t } else if d.disallowUnknownFields { @@ -850,11 +859,13 @@ func (d *decodeState) object(v reflect.Value) error { if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } - // Reset errorContext to its original state. - // Keep the same underlying array for FieldStack, to reuse the - // space and avoid unnecessary allocs. - d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] - d.errorContext.Struct = origErrorContext.Struct + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } if d.opcode == scanEndObject { break } diff --git a/mantle/vendor/github.com/coreos/go-json/encode.go b/mantle/vendor/github.com/coreos/go-json/encode.go index 578d5511..e473e615 100644 --- a/mantle/vendor/github.com/coreos/go-json/encode.go +++ b/mantle/vendor/github.com/coreos/go-json/encode.go @@ -236,6 +236,8 @@ func (e *UnsupportedTypeError) Error() string { return "json: unsupported type: " + e.Type.String() } +// An UnsupportedValueError is returned by Marshal when attempting +// to encode an unsupported value. type UnsupportedValueError struct { Value reflect.Value Str string @@ -779,28 +781,40 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + ptr := v.Pointer() + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } e.WriteByte('{') // Extract and sort the keys. - keys := v.MapKeys() - sv := make([]reflectWithString, len(keys)) - for i, v := range keys { - sv[i].v = v + sv := make([]reflectWithString, v.Len()) + mi := v.MapRange() + for i := 0; mi.Next(); i++ { + sv[i].k = mi.Key() + sv[i].v = mi.Value() if err := sv[i].resolve(); err != nil { e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error())) } } - sort.Slice(sv, func(i, j int) bool { return sv[i].s < sv[j].s }) + sort.Slice(sv, func(i, j int) bool { return sv[i].ks < sv[j].ks }) for i, kv := range sv { if i > 0 { e.WriteByte(',') } - e.string(kv.s, opts.escapeHTML) + e.string(kv.ks, opts.escapeHTML) e.WriteByte(':') - me.elemEnc(e, v.MapIndex(kv.v), opts) + me.elemEnc(e, kv.v, opts) } e.WriteByte('}') + e.ptrLevel-- } func newMapEncoder(t reflect.Type) encoderFunc { @@ -857,7 +871,23 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + // Here we use a struct to memorize the pointer to the first element of the slice + // and its length. + ptr := struct { + ptr uintptr + len int + }{v.Pointer(), v.Len()} + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } se.arrayEnc(e, v, opts) + e.ptrLevel-- } func newSliceEncoder(t reflect.Type) encoderFunc { @@ -946,7 +976,7 @@ func isValidTag(s string) bool { } for _, c := range s { switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. @@ -968,29 +998,30 @@ func typeByIndex(t reflect.Type, index []int) reflect.Type { } type reflectWithString struct { - v reflect.Value - s string + k reflect.Value + v reflect.Value + ks string } func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() + if w.k.Kind() == reflect.String { + w.ks = w.k.String() return nil } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - if w.v.Kind() == reflect.Ptr && w.v.IsNil() { + if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok { + if w.k.Kind() == reflect.Ptr && w.k.IsNil() { return nil } buf, err := tm.MarshalText() - w.s = string(buf) + w.ks = string(buf) return err } - switch w.v.Kind() { + switch w.k.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) + w.ks = strconv.FormatInt(w.k.Int(), 10) return nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) + w.ks = strconv.FormatUint(w.k.Uint(), 10) return nil } panic("unexpected map key type") @@ -1210,19 +1241,18 @@ func typeFields(t reflect.Type) structFields { // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) - isUnexported := sf.PkgPath != "" if sf.Anonymous { t := sf.Type if t.Kind() == reflect.Ptr { t = t.Elem() } - if isUnexported && t.Kind() != reflect.Struct { + if !sf.IsExported() && t.Kind() != reflect.Struct { // Ignore embedded fields of unexported non-struct types. continue } // Do not ignore embedded fields of unexported struct types // since they may have exported fields. - } else if isUnexported { + } else if !sf.IsExported() { // Ignore unexported non-embedded fields. continue } diff --git a/mantle/vendor/github.com/coreos/go-json/fuzz.go b/mantle/vendor/github.com/coreos/go-json/fuzz.go index be03f0d7..d3fa2d11 100644 --- a/mantle/vendor/github.com/coreos/go-json/fuzz.go +++ b/mantle/vendor/github.com/coreos/go-json/fuzz.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gofuzz // +build gofuzz package json diff --git a/mantle/vendor/github.com/coreos/go-json/go.mod b/mantle/vendor/github.com/coreos/go-json/go.mod deleted file mode 100644 index 8f9f23f4..00000000 --- a/mantle/vendor/github.com/coreos/go-json/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/coreos/go-json - -go 1.15 diff --git a/mantle/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go b/mantle/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go index 91584a16..147f756f 100644 --- a/mantle/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/dbus/dbus.go @@ -16,6 +16,7 @@ package dbus import ( + "context" "encoding/hex" "fmt" "os" @@ -110,51 +111,76 @@ type Conn struct { } } -// New establishes a connection to any available bus and authenticates. -// Callers should call Close() when done with the connection. +// Deprecated: use NewWithContext instead. func New() (*Conn, error) { - conn, err := NewSystemConnection() + return NewWithContext(context.Background()) +} + +// NewWithContext establishes a connection to any available bus and authenticates. +// Callers should call Close() when done with the connection. +func NewWithContext(ctx context.Context) (*Conn, error) { + conn, err := NewSystemConnectionContext(ctx) if err != nil && os.Geteuid() == 0 { - return NewSystemdConnection() + return NewSystemdConnectionContext(ctx) } return conn, err } -// NewSystemConnection establishes a connection to the system bus and authenticates. -// Callers should call Close() when done with the connection +// Deprecated: use NewSystemConnectionContext instead. func NewSystemConnection() (*Conn, error) { + return NewSystemConnectionContext(context.Background()) +} + +// NewSystemConnectionContext establishes a connection to the system bus and authenticates. +// Callers should call Close() when done with the connection. +func NewSystemConnectionContext(ctx context.Context) (*Conn, error) { return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(dbus.SystemBusPrivate) + return dbusAuthHelloConnection(ctx, dbus.SystemBusPrivate) }) } -// NewUserConnection establishes a connection to the session bus and +// Deprecated: use NewUserConnectionContext instead. +func NewUserConnection() (*Conn, error) { + return NewUserConnectionContext(context.Background()) +} + +// NewUserConnectionContext establishes a connection to the session bus and // authenticates. This can be used to connect to systemd user instances. // Callers should call Close() when done with the connection. -func NewUserConnection() (*Conn, error) { +func NewUserConnectionContext(ctx context.Context) (*Conn, error) { return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(dbus.SessionBusPrivate) + return dbusAuthHelloConnection(ctx, dbus.SessionBusPrivate) }) } -// NewSystemdConnection establishes a private, direct connection to systemd. +// Deprecated: use NewSystemdConnectionContext instead. +func NewSystemdConnection() (*Conn, error) { + return NewSystemdConnectionContext(context.Background()) +} + +// NewSystemdConnectionContext establishes a private, direct connection to systemd. // This can be used for communicating with systemd without a dbus daemon. // Callers should call Close() when done with the connection. -func NewSystemdConnection() (*Conn, error) { +func NewSystemdConnectionContext(ctx context.Context) (*Conn, error) { return NewConnection(func() (*dbus.Conn, error) { // We skip Hello when talking directly to systemd. - return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) { - return dbus.Dial("unix:path=/run/systemd/private") + return dbusAuthConnection(ctx, func(opts ...dbus.ConnOption) (*dbus.Conn, error) { + return dbus.Dial("unix:path=/run/systemd/private", opts...) }) }) } -// Close closes an established connection +// Close closes an established connection. func (c *Conn) Close() { c.sysconn.Close() c.sigconn.Close() } +// Connected returns whether conn is connected +func (c *Conn) Connected() bool { + return c.sysconn.Connected() && c.sigconn.Connected() +} + // NewConnection establishes a connection to a bus using a caller-supplied function. // This allows connecting to remote buses through a user-supplied mechanism. // The supplied function may be called multiple times, and should return independent connections. @@ -192,7 +218,7 @@ func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { // GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager // interface. The value is returned in its string representation, as defined at -// https://developer.gnome.org/glib/unstable/gvariant-text.html +// https://developer.gnome.org/glib/unstable/gvariant-text.html. func (c *Conn) GetManagerProperty(prop string) (string, error) { variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) if err != nil { @@ -201,8 +227,8 @@ func (c *Conn) GetManagerProperty(prop string) (string, error) { return variant.String(), nil } -func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus() +func dbusAuthConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := createBus(dbus.WithContext(ctx)) if err != nil { return nil, err } @@ -221,8 +247,8 @@ func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, err return conn, nil } -func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := dbusAuthConnection(createBus) +func dbusAuthHelloConnection(ctx context.Context, createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { + conn, err := dbusAuthConnection(ctx, createBus) if err != nil { return nil, err } diff --git a/mantle/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go b/mantle/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go index e38659d7..074148cb 100644 --- a/mantle/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/dbus/methods.go @@ -15,6 +15,7 @@ package dbus import ( + "context" "errors" "fmt" "path" @@ -23,6 +24,18 @@ import ( "github.com/godbus/dbus/v5" ) +// Who can be used to specify which process to kill in the unit via the KillUnitWithTarget API +type Who string + +const ( + // All sends the signal to all processes in the unit + All Who = "all" + // Main sends the signal to the main process of the unit + Main Who = "main" + // Control sends the signal to the control process of the unit + Control Who = "control" +) + func (c *Conn) jobComplete(signal *dbus.Signal) { var id uint32 var job dbus.ObjectPath @@ -38,14 +51,14 @@ func (c *Conn) jobComplete(signal *dbus.Signal) { c.jobListener.Unlock() } -func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { +func (c *Conn) startJob(ctx context.Context, ch chan<- string, job string, args ...interface{}) (int, error) { if ch != nil { c.jobListener.Lock() defer c.jobListener.Unlock() } var p dbus.ObjectPath - err := c.sysobj.Call(job, 0, args...).Store(&p) + err := c.sysobj.CallWithContext(ctx, job, 0, args...).Store(&p) if err != nil { return 0, err } @@ -60,7 +73,12 @@ func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, return jobID, nil } -// StartUnit enqueues a start job and depending jobs, if any (unless otherwise +// Deprecated: use StartUnitContext instead. +func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { + return c.StartUnitContext(context.Background(), name, mode, ch) +} + +// StartUnitContext enqueues a start job and depending jobs, if any (unless otherwise // specified by the mode string). // // Takes the unit to activate, plus a mode string. The mode needs to be one of @@ -90,72 +108,130 @@ func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, // should not be considered authoritative. // // If an error does occur, it will be returned to the user alongside a job ID of 0. -func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) +func (c *Conn) StartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) } -// StopUnit is similar to StartUnit but stops the specified unit rather -// than starting it. +// Deprecated: use StopUnitContext instead. func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) + return c.StopUnitContext(context.Background(), name, mode, ch) +} + +// StopUnitContext is similar to StartUnitContext, but stops the specified unit +// rather than starting it. +func (c *Conn) StopUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) } -// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. +// Deprecated: use ReloadUnitContext instead. func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) + return c.ReloadUnitContext(context.Background(), name, mode, ch) } -// RestartUnit restarts a service. If a service is restarted that isn't -// running it will be started. +// ReloadUnitContext reloads a unit. Reloading is done only if the unit +// is already running, and fails otherwise. +func (c *Conn) ReloadUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) +} + +// Deprecated: use RestartUnitContext instead. func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) + return c.RestartUnitContext(context.Background(), name, mode, ch) } -// TryRestartUnit is like RestartUnit, except that a service that isn't running -// is not affected by the restart. +// RestartUnitContext restarts a service. If a service is restarted that isn't +// running it will be started. +func (c *Conn) RestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) +} + +// Deprecated: use TryRestartUnitContext instead. func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) + return c.TryRestartUnitContext(context.Background(), name, mode, ch) +} + +// TryRestartUnitContext is like RestartUnitContext, except that a service that +// isn't running is not affected by the restart. +func (c *Conn) TryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) } -// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart -// otherwise. +// Deprecated: use ReloadOrRestartUnitContext instead. func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) + return c.ReloadOrRestartUnitContext(context.Background(), name, mode, ch) } -// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try" -// flavored restart otherwise. +// ReloadOrRestartUnitContext attempts a reload if the unit supports it and use +// a restart otherwise. +func (c *Conn) ReloadOrRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) +} + +// Deprecated: use ReloadOrTryRestartUnitContext instead. func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) + return c.ReloadOrTryRestartUnitContext(context.Background(), name, mode, ch) +} + +// ReloadOrTryRestartUnitContext attempts a reload if the unit supports it, +// and use a "Try" flavored restart otherwise. +func (c *Conn) ReloadOrTryRestartUnitContext(ctx context.Context, name string, mode string, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) } -// StartTransientUnit() may be used to create and start a transient unit, which +// Deprecated: use StartTransientUnitContext instead. +func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.StartTransientUnitContext(context.Background(), name, mode, properties, ch) +} + +// StartTransientUnitContext may be used to create and start a transient unit, which // will be released as soon as it is not running or referenced anymore or the // system is rebooted. name is the unit name including suffix, and must be -// unique. mode is the same as in StartUnit(), properties contains properties +// unique. mode is the same as in StartUnitContext, properties contains properties // of the unit. -func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) +func (c *Conn) StartTransientUnitContext(ctx context.Context, name string, mode string, properties []Property, ch chan<- string) (int, error) { + return c.startJob(ctx, ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) } -// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's -// processes are killed. +// Deprecated: use KillUnitContext instead. func (c *Conn) KillUnit(name string, signal int32) { - c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() + c.KillUnitContext(context.Background(), name, signal) +} + +// KillUnitContext takes the unit name and a UNIX signal number to send. +// All of the unit's processes are killed. +func (c *Conn) KillUnitContext(ctx context.Context, name string, signal int32) { + c.KillUnitWithTarget(ctx, name, All, signal) } -// ResetFailedUnit resets the "failed" state of a specific unit. +// KillUnitWithTarget is like KillUnitContext, but allows you to specify which +// process in the unit to send the signal to. +func (c *Conn) KillUnitWithTarget(ctx context.Context, name string, target Who, signal int32) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.KillUnit", 0, name, string(target), signal).Store() +} + +// Deprecated: use ResetFailedUnitContext instead. func (c *Conn) ResetFailedUnit(name string) error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() + return c.ResetFailedUnitContext(context.Background(), name) +} + +// ResetFailedUnitContext resets the "failed" state of a specific unit. +func (c *Conn) ResetFailedUnitContext(ctx context.Context, name string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() } -// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`. +// Deprecated: use SystemStateContext instead. func (c *Conn) SystemState() (*Property, error) { + return c.SystemStateContext(context.Background()) +} + +// SystemStateContext returns the systemd state. Equivalent to +// systemctl is-system-running. +func (c *Conn) SystemStateContext(ctx context.Context) (*Property, error) { var err error var prop dbus.Variant obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") - err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) if err != nil { return nil, err } @@ -163,8 +239,8 @@ func (c *Conn) SystemState() (*Property, error) { return &Property{Name: "SystemState", Value: prop}, nil } -// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface -func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { +// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface. +func (c *Conn) getProperties(ctx context.Context, path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { var err error var props map[string]dbus.Variant @@ -173,7 +249,7 @@ func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[st } obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) if err != nil { return nil, err } @@ -186,24 +262,42 @@ func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[st return out, nil } -// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties. +// Deprecated: use GetUnitPropertiesContext instead. func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { + return c.GetUnitPropertiesContext(context.Background(), unit) +} + +// GetUnitPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetUnitPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { path := unitPath(unit) - return c.getProperties(path, "org.freedesktop.systemd1.Unit") + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") } -// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties. +// Deprecated: use GetUnitPathPropertiesContext instead. func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { - return c.getProperties(path, "org.freedesktop.systemd1.Unit") + return c.GetUnitPathPropertiesContext(context.Background(), path) } -// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties. +// GetUnitPathPropertiesContext takes the (escaped) unit path and returns all +// of its dbus object properties. +func (c *Conn) GetUnitPathPropertiesContext(ctx context.Context, path dbus.ObjectPath) (map[string]interface{}, error) { + return c.getProperties(ctx, path, "org.freedesktop.systemd1.Unit") +} + +// Deprecated: use GetAllPropertiesContext instead. func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { + return c.GetAllPropertiesContext(context.Background(), unit) +} + +// GetAllPropertiesContext takes the (unescaped) unit name and returns all of +// its dbus object properties. +func (c *Conn) GetAllPropertiesContext(ctx context.Context, unit string) (map[string]interface{}, error) { path := unitPath(unit) - return c.getProperties(path, "") + return c.getProperties(ctx, path, "") } -func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { +func (c *Conn) getProperty(ctx context.Context, unit string, dbusInterface string, propertyName string) (*Property, error) { var err error var prop dbus.Variant @@ -213,7 +307,7 @@ func (c *Conn) getProperty(unit string, dbusInterface string, propertyName strin } obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) if err != nil { return nil, err } @@ -221,36 +315,65 @@ func (c *Conn) getProperty(unit string, dbusInterface string, propertyName strin return &Property{Name: propertyName, Value: prop}, nil } +// Deprecated: use GetUnitPropertyContext instead. func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) + return c.GetUnitPropertyContext(context.Background(), unit, propertyName) } -// GetServiceProperty returns property for given service name and property name +// GetUnitPropertyContext takes an (unescaped) unit name, and a property name, +// and returns the property value. +func (c *Conn) GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1.Unit", propertyName) +} + +// Deprecated: use GetServicePropertyContext instead. func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { - return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) + return c.GetServicePropertyContext(context.Background(), service, propertyName) +} + +// GetServiceProperty returns property for given service name and property name. +func (c *Conn) GetServicePropertyContext(ctx context.Context, service string, propertyName string) (*Property, error) { + return c.getProperty(ctx, service, "org.freedesktop.systemd1.Service", propertyName) } -// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. -// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope -// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit +// Deprecated: use GetUnitTypePropertiesContext instead. func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { + return c.GetUnitTypePropertiesContext(context.Background(), unit, unitType) +} + +// GetUnitTypePropertiesContext returns the extra properties for a unit, specific to the unit type. +// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope. +// Returns "dbus.Error: Unknown interface" error if the unitType is not the correct type of the unit. +func (c *Conn) GetUnitTypePropertiesContext(ctx context.Context, unit string, unitType string) (map[string]interface{}, error) { path := unitPath(unit) - return c.getProperties(path, "org.freedesktop.systemd1."+unitType) + return c.getProperties(ctx, path, "org.freedesktop.systemd1."+unitType) +} + +// Deprecated: use SetUnitPropertiesContext instead. +func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { + return c.SetUnitPropertiesContext(context.Background(), name, runtime, properties...) } -// SetUnitProperties() may be used to modify certain unit properties at runtime. +// SetUnitPropertiesContext may be used to modify certain unit properties at runtime. // Not all properties may be changed at runtime, but many resource management // settings (primarily those in systemd.cgroup(5)) may. The changes are applied // instantly, and stored on disk for future boots, unless runtime is true, in which // case the settings only apply until the next reboot. name is the name of the unit // to modify. properties are the settings to set, encoded as an array of property // name and value pairs. -func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() +func (c *Conn) SetUnitPropertiesContext(ctx context.Context, name string, runtime bool, properties ...Property) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() } +// Deprecated: use GetUnitTypePropertyContext instead. func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) + return c.GetUnitTypePropertyContext(context.Background(), unit, unitType, propertyName) +} + +// GetUnitTypePropertyContext takes a property name, a unit name, and a unit type, +// and returns a property value. For valid values of unitType, see GetUnitTypePropertiesContext. +func (c *Conn) GetUnitTypePropertyContext(ctx context.Context, unit string, unitType string, propertyName string) (*Property, error) { + return c.getProperty(ctx, unit, "org.freedesktop.systemd1."+unitType, propertyName) } type UnitStatus struct { @@ -294,36 +417,80 @@ func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { return status, nil } -// ListUnits returns an array with all currently loaded units. Note that +// GetUnitByPID returns the unit object path of the unit a process ID +// belongs to. It takes a UNIX PID and returns the object path. The PID must +// refer to an existing system process +func (c *Conn) GetUnitByPID(ctx context.Context, pid uint32) (dbus.ObjectPath, error) { + var result dbus.ObjectPath + + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.GetUnitByPID", 0, pid).Store(&result) + + return result, err +} + +// GetUnitNameByPID returns the name of the unit a process ID belongs to. It +// takes a UNIX PID and returns the object path. The PID must refer to an +// existing system process +func (c *Conn) GetUnitNameByPID(ctx context.Context, pid uint32) (string, error) { + path, err := c.GetUnitByPID(ctx, pid) + if err != nil { + return "", err + } + + return unitName(path), nil +} + +// Deprecated: use ListUnitsContext instead. +func (c *Conn) ListUnits() ([]UnitStatus, error) { + return c.ListUnitsContext(context.Background()) +} + +// ListUnitsContext returns an array with all currently loaded units. Note that // units may be known by multiple names at the same time, and hence there might // be more unit names loaded than actual units behind them. // Also note that a unit is only loaded if it is active and/or enabled. // Units that are both disabled and inactive will thus not be returned. -func (c *Conn) ListUnits() ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) +func (c *Conn) ListUnitsContext(ctx context.Context) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnits", 0).Store) } -// ListUnitsFiltered returns an array with units filtered by state. -// It takes a list of units' statuses to filter. +// Deprecated: use ListUnitsFilteredContext instead. func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) + return c.ListUnitsFilteredContext(context.Background(), states) +} + +// ListUnitsFilteredContext returns an array with units filtered by state. +// It takes a list of units' statuses to filter. +func (c *Conn) ListUnitsFilteredContext(ctx context.Context, states []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) +} + +// Deprecated: use ListUnitsByPatternsContext instead. +func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { + return c.ListUnitsByPatternsContext(context.Background(), states, patterns) } -// ListUnitsByPatterns returns an array with units. +// ListUnitsByPatternsContext returns an array with units. // It takes a list of units' statuses and names to filter. // Note that units may be known by multiple names at the same time, // and hence there might be more unit names loaded than actual units behind them. -func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) +func (c *Conn) ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) } -// ListUnitsByNames returns an array with units. It takes a list of units' -// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns +// Deprecated: use ListUnitsByNamesContext instead. +func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { + return c.ListUnitsByNamesContext(context.Background(), units) +} + +// ListUnitsByNamesContext returns an array with units. It takes a list of units' +// names and returns an UnitStatus array. Comparing to ListUnitsByPatternsContext // method, this method returns statuses even for inactive or non-existing // units. Input array should contain exact unit names, but not patterns. -// Note: Requires systemd v230 or higher -func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) +// +// Requires systemd v230 or higher. +func (c *Conn) ListUnitsByNamesContext(ctx context.Context, units []string) ([]UnitStatus, error) { + return c.listUnitsInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) } type UnitFile struct { @@ -357,25 +524,43 @@ func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { return files, nil } -// ListUnitFiles returns an array of all available units on disk. +// Deprecated: use ListUnitFilesContext instead. func (c *Conn) ListUnitFiles() ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) + return c.ListUnitFilesContext(context.Background()) } -// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. +// ListUnitFiles returns an array of all available units on disk. +func (c *Conn) ListUnitFilesContext(ctx context.Context) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) +} + +// Deprecated: use ListUnitFilesByPatternsContext instead. func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) + return c.ListUnitFilesByPatternsContext(context.Background(), states, patterns) +} + +// ListUnitFilesByPatternsContext returns an array of all available units on disk matched the patterns. +func (c *Conn) ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]UnitFile, error) { + return c.listUnitFilesInternal(c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) } type LinkUnitFileChange EnableUnitFileChange -// LinkUnitFiles() links unit files (that are located outside of the +// Deprecated: use LinkUnitFilesContext instead. +func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { + return c.LinkUnitFilesContext(context.Background(), files, runtime, force) +} + +// LinkUnitFilesContext links unit files (that are located outside of the // usual unit search paths) into the unit search path. // // It takes a list of absolute paths to unit files to link and two -// booleans. The first boolean controls whether the unit shall be +// booleans. +// +// The first boolean controls whether the unit shall be // enabled for runtime only (true, /run), or persistently (false, // /etc). +// // The second controls whether symlinks pointing to other units shall // be replaced if necessary. // @@ -383,9 +568,9 @@ type LinkUnitFileChange EnableUnitFileChange // structures with three strings: the type of the change (one of symlink // or unlink), the file name of the symlink and the destination of the // symlink. -func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { +func (c *Conn) LinkUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) if err != nil { return nil, err } @@ -409,8 +594,13 @@ func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUn return changes, nil } -// EnableUnitFiles() may be used to enable one or more units in the system (by -// creating symlinks to them in /etc or /run). +// Deprecated: use EnableUnitFilesContext instead. +func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { + return c.EnableUnitFilesContext(context.Background(), files, runtime, force) +} + +// EnableUnitFilesContext may be used to enable one or more units in the system +// (by creating symlinks to them in /etc or /run). // // It takes a list of unit files to enable (either just file names or full // absolute paths if the unit files are residing outside the usual unit @@ -425,11 +615,11 @@ func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUn // structures with three strings: the type of the change (one of symlink // or unlink), the file name of the symlink and the destination of the // symlink. -func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { +func (c *Conn) EnableUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { var carries_install_info bool result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) if err != nil { return false, nil, err } @@ -459,8 +649,13 @@ type EnableUnitFileChange struct { Destination string // Destination of the symlink } -// DisableUnitFiles() may be used to disable one or more units in the system (by -// removing symlinks to them from /etc or /run). +// Deprecated: use DisableUnitFilesContext instead. +func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { + return c.DisableUnitFilesContext(context.Background(), files, runtime) +} + +// DisableUnitFilesContext may be used to disable one or more units in the +// system (by removing symlinks to them from /etc or /run). // // It takes a list of unit files to disable (either just file names or full // absolute paths if the unit files are residing outside the usual unit @@ -471,9 +666,9 @@ type EnableUnitFileChange struct { // consists of structures with three strings: the type of the change (one of // symlink or unlink), the file name of the symlink and the destination of the // symlink. -func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { +func (c *Conn) DisableUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]DisableUnitFileChange, error) { result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } @@ -503,18 +698,23 @@ type DisableUnitFileChange struct { Destination string // Destination of the symlink } -// MaskUnitFiles masks one or more units in the system -// -// It takes three arguments: -// * list of units to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -// * force flag +// Deprecated: use MaskUnitFilesContext instead. func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { + return c.MaskUnitFilesContext(context.Background(), files, runtime, force) +} + +// MaskUnitFilesContext masks one or more units in the system. +// +// The files argument contains a list of units to mask (either just file names +// or full absolute paths if the unit files are residing outside the usual unit +// search paths). +// +// The runtime argument is used to specify whether the unit was enabled for +// runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) MaskUnitFilesContext(ctx context.Context, files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) if err != nil { return nil, err } @@ -544,17 +744,21 @@ type MaskUnitFileChange struct { Destination string // Destination of the symlink } -// UnmaskUnitFiles unmasks one or more units in the system -// -// It takes two arguments: -// * list of unit files to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) +// Deprecated: use UnmaskUnitFilesContext instead. func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { + return c.UnmaskUnitFilesContext(context.Background(), files, runtime) +} + +// UnmaskUnitFilesContext unmasks one or more units in the system. +// +// It takes the list of unit files to mask (either just file names or full +// absolute paths if the unit files are residing outside the usual unit search +// paths), and a boolean runtime flag to specify whether the unit was enabled +// for runtime only (true, /run/systemd/..), or persistently (false, +// /etc/systemd/..). +func (c *Conn) UnmaskUnitFilesContext(ctx context.Context, files []string, runtime bool) ([]UnmaskUnitFileChange, error) { result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) + err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) if err != nil { return nil, err } @@ -584,17 +788,77 @@ type UnmaskUnitFileChange struct { Destination string // Destination of the symlink } -// Reload instructs systemd to scan for and reload unit files. This is -// equivalent to a 'systemctl daemon-reload'. +// Deprecated: use ReloadContext instead. func (c *Conn) Reload() error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() + return c.ReloadContext(context.Background()) +} + +// ReloadContext instructs systemd to scan for and reload unit files. This is +// an equivalent to systemctl daemon-reload. +func (c *Conn) ReloadContext(ctx context.Context) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.Reload", 0).Store() } func unitPath(name string) dbus.ObjectPath { return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) } -// unitName returns the unescaped base element of the supplied escaped path +// unitName returns the unescaped base element of the supplied escaped path. func unitName(dpath dbus.ObjectPath) string { return pathBusUnescape(path.Base(string(dpath))) } + +// JobStatus holds a currently queued job definition. +type JobStatus struct { + Id uint32 // The numeric job id + Unit string // The primary unit name for this job + JobType string // The job type as string + Status string // The job state as string + JobPath dbus.ObjectPath // The job object path + UnitPath dbus.ObjectPath // The unit object path +} + +// Deprecated: use ListJobsContext instead. +func (c *Conn) ListJobs() ([]JobStatus, error) { + return c.ListJobsContext(context.Background()) +} + +// ListJobsContext returns an array with all currently queued jobs. +func (c *Conn) ListJobsContext(ctx context.Context) ([]JobStatus, error) { + return c.listJobsInternal(ctx) +} + +func (c *Conn) listJobsInternal(ctx context.Context) ([]JobStatus, error) { + result := make([][]interface{}, 0) + if err := c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ListJobs", 0).Store(&result); err != nil { + return nil, err + } + + resultInterface := make([]interface{}, len(result)) + for i := range result { + resultInterface[i] = result[i] + } + + status := make([]JobStatus, len(result)) + statusInterface := make([]interface{}, len(status)) + for i := range status { + statusInterface[i] = &status[i] + } + + if err := dbus.Store(resultInterface, statusInterface...); err != nil { + return nil, err + } + + return status, nil +} + +// Freeze the cgroup associated with the unit. +// Note that FreezeUnit and ThawUnit are only supported on systems running with cgroup v2. +func (c *Conn) FreezeUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.FreezeUnit", 0, unit).Store() +} + +// Unfreeze the cgroup associated with the unit. +func (c *Conn) ThawUnit(ctx context.Context, unit string) error { + return c.sysobj.CallWithContext(ctx, "org.freedesktop.systemd1.Manager.ThawUnit", 0, unit).Store() +} diff --git a/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal.go index a0f4837a..ac24c776 100644 --- a/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal.go +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal.go @@ -23,20 +23,7 @@ package journal import ( - "bytes" - "encoding/binary" - "errors" "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "sync" - "sync/atomic" - "syscall" - "unsafe" ) // Priority of a journal message @@ -53,173 +40,7 @@ const ( PriDebug ) -var ( - // This can be overridden at build-time: - // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable - journalSocket = "/run/systemd/journal/socket" - - // unixConnPtr atomically holds the local unconnected Unix-domain socket. - // Concrete safe pointer type: *net.UnixConn - unixConnPtr unsafe.Pointer - // onceConn ensures that unixConnPtr is initialized exactly once. - onceConn sync.Once -) - -func init() { - onceConn.Do(initConn) -} - -// Enabled checks whether the local systemd journal is available for logging. -func Enabled() bool { - onceConn.Do(initConn) - - if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { - return false - } - - if _, err := net.Dial("unixgram", journalSocket); err != nil { - return false - } - - return true -} - -// Send a message to the local systemd journal. vars is a map of journald -// fields to values. Fields must be composed of uppercase letters, numbers, -// and underscores, but must not start with an underscore. Within these -// restrictions, any arbitrary field name may be used. Some names have special -// significance: see the journalctl documentation -// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) -// for more details. vars may be nil. -func Send(message string, priority Priority, vars map[string]string) error { - conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) - if conn == nil { - return errors.New("could not initialize socket to journald") - } - - socketAddr := &net.UnixAddr{ - Name: journalSocket, - Net: "unixgram", - } - - data := new(bytes.Buffer) - appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) - appendVariable(data, "MESSAGE", message) - for k, v := range vars { - appendVariable(data, k, v) - } - - _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) - if err == nil { - return nil - } - if !isSocketSpaceError(err) { - return err - } - - // Large log entry, send it via tempfile and ancillary-fd. - file, err := tempFd() - if err != nil { - return err - } - defer file.Close() - _, err = io.Copy(file, data) - if err != nil { - return err - } - rights := syscall.UnixRights(int(file.Fd())) - _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) - if err != nil { - return err - } - - return nil -} - // Print prints a message to the local systemd journal using Send(). func Print(priority Priority, format string, a ...interface{}) error { return Send(fmt.Sprintf(format, a...), priority, nil) } - -func appendVariable(w io.Writer, name, value string) { - if err := validVarName(name); err != nil { - fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) - } - if strings.ContainsRune(value, '\n') { - /* When the value contains a newline, we write: - * - the variable name, followed by a newline - * - the size (in 64bit little endian format) - * - the data, followed by a newline - */ - fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) - fmt.Fprintln(w, value) - } else { - /* just write the variable and value all on one line */ - fmt.Fprintf(w, "%s=%s\n", name, value) - } -} - -// validVarName validates a variable name to make sure journald will accept it. -// The variable name must be in uppercase and consist only of characters, -// numbers and underscores, and may not begin with an underscore: -// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html -func validVarName(name string) error { - if name == "" { - return errors.New("Empty variable name") - } else if name[0] == '_' { - return errors.New("Variable name begins with an underscore") - } - - for _, c := range name { - if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { - return errors.New("Variable name contains invalid characters") - } - } - return nil -} - -// isSocketSpaceError checks whether the error is signaling -// an "overlarge message" condition. -func isSocketSpaceError(err error) bool { - opErr, ok := err.(*net.OpError) - if !ok || opErr == nil { - return false - } - - sysErr, ok := opErr.Err.(*os.SyscallError) - if !ok || sysErr == nil { - return false - } - - return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS -} - -// tempFd creates a temporary, unlinked file under `/dev/shm`. -func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") - if err != nil { - return nil, err - } - err = syscall.Unlink(file.Name()) - if err != nil { - return nil, err - } - return file, nil -} - -// initConn initializes the global `unixConnPtr` socket. -// It is meant to be called exactly once, at program startup. -func initConn() { - autobind, err := net.ResolveUnixAddr("unixgram", "") - if err != nil { - return - } - - sock, err := net.ListenUnixgram("unixgram", autobind) - if err != nil { - return - } - - atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) -} diff --git a/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go new file mode 100644 index 00000000..439ad287 --- /dev/null +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -0,0 +1,215 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + if c := getOrInitConn(); c == nil { + return false + } + + conn, err := net.Dial("unixgram", journalSocket) + if err != nil { + return false + } + defer conn.Close() + + return true +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := getOrInitConn() + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary +func getOrInitConn() *net.UnixConn { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn != nil { + return conn + } + onceConn.Do(initConn) + return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is automatically called when needed. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/mantle/sdk/version.go b/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go similarity index 52% rename from mantle/sdk/version.go rename to mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go index 58f38aa5..677aca68 100644 --- a/mantle/sdk/version.go +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go @@ -12,20 +12,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -package sdk +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal import ( - "fmt" + "errors" ) -type Versions struct { - Version string - VersionID string - BuildID string - SDKVersion string +func Enabled() bool { + return false } -func VersionsFromDir(dir string) (ver Versions, err error) { - err = fmt.Errorf("This API is deprecated") - return +func Send(message string, priority Priority, vars map[string]string) error { + return errors.New("could not initialize socket to journald") } diff --git a/mantle/vendor/github.com/coreos/go-systemd/v22/unit/deserialize.go b/mantle/vendor/github.com/coreos/go-systemd/v22/unit/deserialize.go index c0c06bdf..283c1507 100644 --- a/mantle/vendor/github.com/coreos/go-systemd/v22/unit/deserialize.go +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/unit/deserialize.go @@ -43,37 +43,99 @@ var ( ErrLineTooLong = fmt.Errorf("line too long (max %d bytes)", SYSTEMD_LINE_MAX) ) -// Deserialize parses a systemd unit file into a list of UnitOption objects. +// DeserializeOptions parses a systemd unit file into a list of UnitOptions +func DeserializeOptions(f io.Reader) (opts []*UnitOption, err error) { + _, options, err := deserializeAll(f) + return options, err +} + +// DeserializeSections deserializes into a list of UnitSections. +func DeserializeSections(f io.Reader) ([]*UnitSection, error) { + sections, _, err := deserializeAll(f) + return sections, err +} + +// Deserialize parses a systemd unit file into a list of UnitOptions. +// Note: this function is deprecated in favor of DeserializeOptions +// and will be removed at a future date. func Deserialize(f io.Reader) (opts []*UnitOption, err error) { - lexer, optchan, errchan := newLexer(f) + return DeserializeOptions(f) +} + +type lexDataType int + +const ( + sectionKind lexDataType = iota + optionKind +) + +// lexChanData - support either datatype in the lex channel. +// Poor man's union data type. +type lexData struct { + Type lexDataType + Option *UnitOption + Section *UnitSection +} + +// deserializeAll deserializes into UnitSections and UnitOptions. +func deserializeAll(f io.Reader) ([]*UnitSection, []*UnitOption, error) { + + lexer, lexchan, errchan := newLexer(f) + go lexer.lex() - for opt := range optchan { - opts = append(opts, &(*opt)) + sections := []*UnitSection{} + options := []*UnitOption{} + + for ld := range lexchan { + switch ld.Type { + case optionKind: + if ld.Option != nil { + // add to options + opt := ld.Option + options = append(options, &(*opt)) + + // sanity check. "should not happen" as sectionKind is first in code flow. + if len(sections) == 0 { + return nil, nil, fmt.Errorf( + "Unit file misparse: option before section") + } + + // add to newest section entries. + s := len(sections) - 1 + sections[s].Entries = append(sections[s].Entries, + &UnitEntry{Name: opt.Name, Value: opt.Value}) + } + case sectionKind: + if ld.Section != nil { + sections = append(sections, ld.Section) + } + } } - err = <-errchan - return opts, err + err := <-errchan + + return sections, options, err } -func newLexer(f io.Reader) (*lexer, <-chan *UnitOption, <-chan error) { - optchan := make(chan *UnitOption) +func newLexer(f io.Reader) (*lexer, <-chan *lexData, <-chan error) { + lexchan := make(chan *lexData) errchan := make(chan error, 1) buf := bufio.NewReader(f) - return &lexer{buf, optchan, errchan, ""}, optchan, errchan + return &lexer{buf, lexchan, errchan, ""}, lexchan, errchan } type lexer struct { buf *bufio.Reader - optchan chan *UnitOption + lexchan chan *lexData errchan chan error section string } func (l *lexer) lex() { defer func() { - close(l.optchan) + close(l.lexchan) close(l.errchan) }() next := l.lexNextSection @@ -123,7 +185,13 @@ func (l *lexer) lexSectionSuffixFunc(section string) lexStep { garbage = bytes.TrimSpace(garbage) if len(garbage) > 0 { - return nil, fmt.Errorf("found garbage after section name %s: %v", l.section, garbage) + return nil, fmt.Errorf("found garbage after section name %s: %q", l.section, garbage) + } + + l.lexchan <- &lexData{ + Type: sectionKind, + Section: &UnitSection{Section: section, Entries: []*UnitEntry{}}, + Option: nil, } return l.lexNextSectionOrOptionFunc(section), nil @@ -252,7 +320,11 @@ func (l *lexer) lexOptionValueFunc(section, name string, partial bytes.Buffer) l } else { val = strings.TrimSpace(val) } - l.optchan <- &UnitOption{Section: section, Name: name, Value: val} + l.lexchan <- &lexData{ + Type: optionKind, + Section: nil, + Option: &UnitOption{Section: section, Name: name, Value: val}, + } return l.lexNextSectionOrOptionFunc(section), nil } diff --git a/mantle/vendor/github.com/coreos/go-systemd/v22/unit/escape.go b/mantle/vendor/github.com/coreos/go-systemd/v22/unit/escape.go index 63b11726..98e2044a 100644 --- a/mantle/vendor/github.com/coreos/go-systemd/v22/unit/escape.go +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/unit/escape.go @@ -27,14 +27,15 @@ const ( ) // If isPath is true: -// We remove redundant '/'s, the leading '/', and trailing '/'. -// If the result is empty, a '/' is inserted. +// +// We remove redundant '/'s, the leading '/', and trailing '/'. +// If the result is empty, a '/' is inserted. // // We always: -// Replace the following characters with `\x%x`: -// Leading `.` -// `-`, `\`, and anything not in this set: `:-_.\[0-9a-zA-Z]` -// Replace '/' with '-'. +// +// Replace the following characters with `\x%x`: Leading `.`, +// `-`, `\`, and anything not in this set: `:-_.\[0-9a-zA-Z]` +// Replace '/' with '-'. func escape(unescaped string, isPath bool) string { e := []byte{} inSlashes := false @@ -69,11 +70,13 @@ func escape(unescaped string, isPath bool) string { } // If isPath is true: -// We always return a string beginning with '/'. +// +// We always return a string beginning with '/'. // // We always: -// Replace '-' with '/'. -// Replace `\x%x` with the value represented in hex. +// +// Replace '-' with '/'. +// Replace `\x%x` with the value represented in hex. func unescape(escaped string, isPath bool) string { u := []byte{} for i := 0; i < len(escaped); i++ { diff --git a/mantle/vendor/github.com/minio/sha256-simd/cpuid_other.go b/mantle/vendor/github.com/coreos/go-systemd/v22/unit/section.go similarity index 45% rename from mantle/vendor/github.com/minio/sha256-simd/cpuid_other.go rename to mantle/vendor/github.com/coreos/go-systemd/v22/unit/section.go index 3e441582..217a183c 100644 --- a/mantle/vendor/github.com/minio/sha256-simd/cpuid_other.go +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/unit/section.go @@ -1,4 +1,4 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. +// Copyright 2020 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,24 +11,34 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// - -// +build !386,!amd64,!arm,!arm64 arm64,!linux -package sha256 +package unit -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 +// UnitEntry is a single line entry in a Unit file. +type UnitEntry struct { + Name string + Value string } -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 +// UnitSection is a section in a Unit file. The section name +// and a list of entries in that section. +type UnitSection struct { + Section string + Entries []*UnitEntry } -func xgetbv(index uint32) (eax, edx uint32) { - return 0, 0 +// String implements the stringify interface for UnitEntry +func (u *UnitEntry) String() string { + return "{Name: " + u.Name + ", " + "Value: " + u.Value + "}" } -func haveArmSha() bool { - return false +// String implements the stringify interface for UnitSection +func (s *UnitSection) String() string { + result := "{Section: " + s.Section + for _, e := range s.Entries { + result += e.String() + } + + result += "}" + return result } diff --git a/mantle/vendor/github.com/coreos/go-systemd/v22/unit/serialize.go b/mantle/vendor/github.com/coreos/go-systemd/v22/unit/serialize.go index e07799ca..c1b79c02 100644 --- a/mantle/vendor/github.com/coreos/go-systemd/v22/unit/serialize.go +++ b/mantle/vendor/github.com/coreos/go-systemd/v22/unit/serialize.go @@ -58,6 +58,29 @@ func Serialize(opts []*UnitOption) io.Reader { return &buf } +// SerializeSections will serializes the unit file from the given +// UnitSections. +func SerializeSections(sections []*UnitSection) io.Reader { + + var buf bytes.Buffer + + for i, s := range sections { + writeSectionHeader(&buf, s.Section) + writeNewline(&buf) + + for _, e := range s.Entries { + writeOption(&buf, &UnitOption{s.Section, e.Name, e.Value}) + writeNewline(&buf) + } + + if i < len(sections)-1 { + writeNewline(&buf) + } + } + + return &buf +} + func writeNewline(buf *bytes.Buffer) { buf.WriteRune('\n') } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go b/mantle/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go index 7761280d..492fd7e6 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go @@ -62,6 +62,7 @@ var ( ErrClevisCustomWithOthers = errors.New("cannot use custom clevis config with tpm2, tang, or threshold") ErrTangThumbprintRequired = errors.New("thumbprint is required") ErrFileIllegalMode = errors.New("illegal file mode") + ErrModeSpecialBits = errors.New("setuid/setgid/sticky bits are not supported in spec versions older than 3.4.0") ErrBothIDAndNameSet = errors.New("cannot set both id and name") ErrLabelTooLong = errors.New("partition labels may not exceed 36 characters") ErrDoesntMatchGUIDRegex = errors.New("doesn't match the form \"01234567-89AB-CDEF-EDCB-A98765432101\"") @@ -99,6 +100,7 @@ var ( ErrEngineConfiguration = errors.New("engine incorrectly configured") // AWS S3 specific errors + ErrInvalidS3ARN = errors.New("invalid S3 ARN format") ErrInvalidS3ObjectVersionId = errors.New("invalid S3 object VersionId") // Obsolete errors, left here for ABI compatibility diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/directory.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/directory.go index c1cc2440..0327b022 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/directory.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/directory.go @@ -22,5 +22,6 @@ import ( func (d Directory) Validate(c path.ContextPath) (r report.Report) { r.Merge(d.Node.Validate(c)) r.AddOnError(c.Append("mode"), validateMode(d.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(d.Mode)) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/file.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/file.go index 26466ece..5fa9ca8b 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/file.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/file.go @@ -25,6 +25,7 @@ import ( func (f File) Validate(c path.ContextPath) (r report.Report) { r.Merge(f.Node.Validate(c)) r.AddOnError(c.Append("mode"), validateMode(f.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(f.Mode)) r.AddOnError(c.Append("overwrite"), f.validateOverwrite()) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/mode.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/mode.go index 7d23eb3e..6021b915 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/mode.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_0/types/mode.go @@ -24,3 +24,13 @@ func validateMode(m *int) error { } return nil } + +func validateModeSpecialBits(m *int) error { + if m != nil { + mode := uint32(*m) + if mode&07000 != 0 { + return errors.ErrModeSpecialBits + } + } + return nil +} diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/directory.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/directory.go index c1cc2440..0327b022 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/directory.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/directory.go @@ -22,5 +22,6 @@ import ( func (d Directory) Validate(c path.ContextPath) (r report.Report) { r.Merge(d.Node.Validate(c)) r.AddOnError(c.Append("mode"), validateMode(d.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(d.Mode)) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/file.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/file.go index d30ed3de..97aa84a7 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/file.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/file.go @@ -25,6 +25,7 @@ import ( func (f File) Validate(c path.ContextPath) (r report.Report) { r.Merge(f.Node.Validate(c)) r.AddOnError(c.Append("mode"), validateMode(f.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(f.Mode)) r.AddOnError(c.Append("overwrite"), f.validateOverwrite()) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/mode.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/mode.go index 7d23eb3e..6021b915 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/mode.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_1/types/mode.go @@ -24,3 +24,13 @@ func validateMode(m *int) error { } return nil } + +func validateModeSpecialBits(m *int) error { + if m != nil { + mode := uint32(*m) + if mode&07000 != 0 { + return errors.ErrModeSpecialBits + } + } + return nil +} diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/directory.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/directory.go index f6f06845..b01a6bf9 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/directory.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/directory.go @@ -22,5 +22,6 @@ import ( func (d Directory) Validate(c path.ContextPath) (r report.Report) { r.Merge(d.Node.Validate(c)) r.AddOnError(c.Append("mode"), validateMode(d.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(d.Mode)) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/file.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/file.go index 9b71bb26..4e7566bd 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/file.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/file.go @@ -25,6 +25,7 @@ import ( func (f File) Validate(c path.ContextPath) (r report.Report) { r.Merge(f.Node.Validate(c)) r.AddOnError(c.Append("mode"), validateMode(f.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(f.Mode)) r.AddOnError(c.Append("overwrite"), f.validateOverwrite()) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/mode.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/mode.go index 9eb7573d..ad3e51c2 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/mode.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_2/types/mode.go @@ -24,3 +24,13 @@ func validateMode(m *int) error { } return nil } + +func validateModeSpecialBits(m *int) error { + if m != nil { + mode := uint32(*m) + if mode&07000 != 0 { + return errors.ErrModeSpecialBits + } + } + return nil +} diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/directory.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/directory.go index f6f06845..b01a6bf9 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/directory.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/directory.go @@ -22,5 +22,6 @@ import ( func (d Directory) Validate(c path.ContextPath) (r report.Report) { r.Merge(d.Node.Validate(c)) r.AddOnError(c.Append("mode"), validateMode(d.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(d.Mode)) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/file.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/file.go index 9b71bb26..4e7566bd 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/file.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/file.go @@ -25,6 +25,7 @@ import ( func (f File) Validate(c path.ContextPath) (r report.Report) { r.Merge(f.Node.Validate(c)) r.AddOnError(c.Append("mode"), validateMode(f.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(f.Mode)) r.AddOnError(c.Append("overwrite"), f.validateOverwrite()) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/mode.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/mode.go index 9eb7573d..ad3e51c2 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/mode.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_3/types/mode.go @@ -24,3 +24,13 @@ func validateMode(m *int) error { } return nil } + +func validateModeSpecialBits(m *int) error { + if m != nil { + mode := uint32(*m) + if mode&07000 != 0 { + return errors.ErrModeSpecialBits + } + } + return nil +} diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_4_experimental/translate/translate.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_4_experimental/translate/translate.go index 5b39cae9..2539c8f4 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_4_experimental/translate/translate.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_4_experimental/translate/translate.go @@ -16,6 +16,7 @@ package translate import ( "github.com/coreos/ignition/v2/config/translate" + "github.com/coreos/ignition/v2/config/util" old_types "github.com/coreos/ignition/v2/config/v3_3/types" "github.com/coreos/ignition/v2/config/v3_4_experimental/types" ) @@ -27,9 +28,36 @@ func translateIgnition(old old_types.Ignition) (ret types.Ignition) { return } +func translateFileEmbedded1(old old_types.FileEmbedded1) (ret types.FileEmbedded1) { + tr := translate.NewTranslator() + tr.Translate(&old.Append, &ret.Append) + tr.Translate(&old.Contents, &ret.Contents) + if old.Mode != nil { + // We support the special mode bits for specs >=3.4.0, so if + // the user provides special mode bits in an Ignition config + // with the version < 3.4.0, then we need to explicitly mask + // those bits out during translation. + ret.Mode = util.IntToPtr(*old.Mode & ^07000) + } + return +} + +func translateDirectoryEmbedded1(old old_types.DirectoryEmbedded1) (ret types.DirectoryEmbedded1) { + if old.Mode != nil { + // We support the special mode bits for specs >=3.4.0, so if + // the user provides special mode bits in an Ignition config + // with the version < 3.4.0, then we need to explicitly mask + // those bits out during translation. + ret.Mode = util.IntToPtr(*old.Mode & ^07000) + } + return +} + func Translate(old old_types.Config) (ret types.Config) { tr := translate.NewTranslator() tr.AddCustomTranslator(translateIgnition) + tr.AddCustomTranslator(translateDirectoryEmbedded1) + tr.AddCustomTranslator(translateFileEmbedded1) tr.Translate(&old, &ret) return } diff --git a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_4_experimental/types/url.go b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_4_experimental/types/url.go index 0d8771bf..3ca189da 100644 --- a/mantle/vendor/github.com/coreos/ignition/v2/config/v3_4_experimental/types/url.go +++ b/mantle/vendor/github.com/coreos/ignition/v2/config/v3_4_experimental/types/url.go @@ -16,7 +16,9 @@ package types import ( "net/url" + "strings" + "github.com/aws/aws-sdk-go/aws/arn" "github.com/vincent-petithory/dataurl" "github.com/coreos/ignition/v2/config/shared/errors" @@ -39,6 +41,30 @@ func validateURL(s string) error { } } return nil + case "arn": + fullURL := u.Scheme + ":" + u.Opaque + if !arn.IsARN(fullURL) { + return errors.ErrInvalidS3ARN + } + s3arn, err := arn.Parse(fullURL) + if err != nil { + return err + } + if s3arn.Service != "s3" { + return errors.ErrInvalidS3ARN + } + urlSplit := strings.Split(fullURL, "/") + if strings.HasPrefix(s3arn.Resource, "accesspoint/") && len(urlSplit) < 3 { + return errors.ErrInvalidS3ARN + } else if len(urlSplit) < 2 { + return errors.ErrInvalidS3ARN + } + if v, ok := u.Query()["versionId"]; ok { + if len(v) == 0 || v[0] == "" { + return errors.ErrInvalidS3ObjectVersionId + } + } + return nil case "data": if _, err := dataurl.DecodeString(s); err != nil { return err diff --git a/mantle/vendor/github.com/coreos/ioprogress/LICENSE b/mantle/vendor/github.com/coreos/ioprogress/LICENSE deleted file mode 100644 index 22985159..00000000 --- a/mantle/vendor/github.com/coreos/ioprogress/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/mantle/vendor/github.com/coreos/ioprogress/README.md b/mantle/vendor/github.com/coreos/ioprogress/README.md deleted file mode 100644 index 3d291e9d..00000000 --- a/mantle/vendor/github.com/coreos/ioprogress/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# ioprogress - -ioprogress is a Go (golang) library with implementations of `io.Reader` -and `io.Writer` that draws progress bars. The primary use case for these -are for CLI applications but alternate progress bar writers can be supplied -for alternate environments. - -## Example - -![Progress](http://g.recordit.co/GO5HxT16QH.gif) - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/ioprogress -``` - -## Usage - -Here is an example of outputting a basic progress bar to the CLI as -we're "downloading" from some other `io.Reader` (perhaps from a network -connection): - -```go -// Imagine this came from some external source, such as a network connection, -// and that we have the full size of it, such as from a Content-Length HTTP -// header. -var r io.Reader - -// Create the progress reader -progressR := &ioprogress.Reader{ - Reader: r, - Size: rSize, -} - -// Copy all of the reader to some local file f. As it copies, the -// progressR will write progress to the terminal on os.Stdout. This is -// customizable. -io.Copy(f, progressR) -``` diff --git a/mantle/vendor/github.com/coreos/ioprogress/draw.go b/mantle/vendor/github.com/coreos/ioprogress/draw.go deleted file mode 100644 index 329dfca0..00000000 --- a/mantle/vendor/github.com/coreos/ioprogress/draw.go +++ /dev/null @@ -1,135 +0,0 @@ -package ioprogress - -import ( - "fmt" - "io" - "os" - "strings" - - "golang.org/x/crypto/ssh/terminal" -) - -// DrawFunc is the callback type for drawing progress. -type DrawFunc func(int64, int64) error - -// DrawTextFormatFunc is a callback used by DrawFuncs that draw text in -// order to format the text into some more human friendly format. -type DrawTextFormatFunc func(int64, int64) string - -var defaultDrawFunc DrawFunc - -func init() { - defaultDrawFunc = DrawTerminal(os.Stdout) -} - -// isTerminal returns True when w is going to a tty, and false otherwise. -func isTerminal(w io.Writer) bool { - if f, ok := w.(*os.File); ok { - return terminal.IsTerminal(int(f.Fd())) - } - return false -} - -// DrawTerminal returns a DrawFunc that draws a progress bar to an io.Writer -// that is assumed to be a terminal (and therefore respects carriage returns). -func DrawTerminal(w io.Writer) DrawFunc { - return DrawTerminalf(w, func(progress, total int64) string { - return fmt.Sprintf("%d/%d", progress, total) - }) -} - -// DrawTerminalf returns a DrawFunc that draws a progress bar to an io.Writer -// that is formatted with the given formatting function. -func DrawTerminalf(w io.Writer, f DrawTextFormatFunc) DrawFunc { - var maxLength int - - return func(progress, total int64) error { - if progress == -1 && total == -1 { - _, err := fmt.Fprintf(w, "\n") - return err - } - - // Make sure we pad it to the max length we've ever drawn so that - // we don't have trailing characters. - line := f(progress, total) - if len(line) < maxLength { - line = fmt.Sprintf( - "%s%s", - line, - strings.Repeat(" ", maxLength-len(line))) - } - maxLength = len(line) - - terminate := "\r" - if !isTerminal(w) { - terminate = "\n" - } - _, err := fmt.Fprint(w, line+terminate) - return err - } -} - -var byteUnits = []string{"B", "KB", "MB", "GB", "TB", "PB"} - -// DrawTextFormatBytes is a DrawTextFormatFunc that formats the progress -// and total into human-friendly byte formats. -func DrawTextFormatBytes(progress, total int64) string { - return fmt.Sprintf("%s/%s", ByteUnitStr(progress), ByteUnitStr(total)) -} - -// DrawTextFormatBar returns a DrawTextFormatFunc that draws a progress -// bar with the given width (in characters). This can be used in conjunction -// with another DrawTextFormatFunc to create a progress bar with bytes, for -// example: -// -// bar := DrawTextFormatBar(20) -// func(progress, total int64) string { -// return fmt.Sprintf( -// "%s %s", -// bar(progress, total), -// DrawTextFormatBytes(progress, total)) -// } -// -func DrawTextFormatBar(width int64) DrawTextFormatFunc { - return DrawTextFormatBarForW(width, nil) -} - -// DrawTextFormatBarForW returns a DrawTextFormatFunc as described in the docs -// for DrawTextFormatBar, however if the io.Writer passed in is not a tty then -// the returned function will always return "". -func DrawTextFormatBarForW(width int64, w io.Writer) DrawTextFormatFunc { - if w != nil && !isTerminal(w) { - return func(progress, total int64) string { - return "" - } - } - - width -= 2 - - return func(progress, total int64) string { - current := int64((float64(progress) / float64(total)) * float64(width)) - if current < 0 || current > width { - return fmt.Sprintf("[%s]", strings.Repeat(" ", int(width))) - } - return fmt.Sprintf( - "[%s%s]", - strings.Repeat("=", int(current)), - strings.Repeat(" ", int(width-current))) - } -} - -// ByteUnitStr pretty prints a number of bytes. -func ByteUnitStr(n int64) string { - var unit string - size := float64(n) - for i := 1; i < len(byteUnits); i++ { - if size < 1000 { - unit = byteUnits[i-1] - break - } - - size = size / 1000 - } - - return fmt.Sprintf("%.3g %s", size, unit) -} diff --git a/mantle/vendor/github.com/coreos/ioprogress/reader.go b/mantle/vendor/github.com/coreos/ioprogress/reader.go deleted file mode 100644 index 7d52731e..00000000 --- a/mantle/vendor/github.com/coreos/ioprogress/reader.go +++ /dev/null @@ -1,107 +0,0 @@ -package ioprogress - -import ( - "io" - "time" -) - -// Reader is an implementation of io.Reader that draws the progress of -// reading some data. -type Reader struct { - // Reader is the underlying reader to read from - Reader io.Reader - - // Size is the total size of the data coming out of the reader. - Size int64 - - // DrawFunc is the callback to invoke to draw the progress bar. By - // default, this will be DrawTerminal(os.Stdout). - // - // DrawInterval is the minimum time to wait between reads to update the - // progress bar. - DrawFunc DrawFunc - DrawInterval time.Duration - - progress int64 - lastDraw time.Time -} - -// Read reads from the underlying reader and invokes the DrawFunc if -// appropriate. The DrawFunc is executed when there is data that is -// read (progress is made) and at least DrawInterval time has passed. -func (r *Reader) Read(p []byte) (int, error) { - // If we haven't drawn before, initialize the progress bar - if r.lastDraw.IsZero() { - r.initProgress() - } - - // Read from the underlying source - n, err := r.Reader.Read(p) - - // Always increment the progress even if there was an error - r.progress += int64(n) - - // If we don't have any errors, then draw the progress. If we are - // at the end of the data, then finish the progress. - if err == nil { - // Only draw if we read data or we've never read data before (to - // initialize the progress bar). - if n > 0 { - r.drawProgress() - } - } - if err == io.EOF { - r.finishProgress() - } - - return n, err -} - -func (r *Reader) drawProgress() { - // If we've drawn before, then make sure that the draw interval - // has passed before we draw again. - interval := r.DrawInterval - if interval == 0 { - interval = time.Second - } - if !r.lastDraw.IsZero() { - nextDraw := r.lastDraw.Add(interval) - if time.Now().Before(nextDraw) { - return - } - } - - // Draw - f := r.drawFunc() - f(r.progress, r.Size) - - // Record this draw so that we don't draw again really quickly - r.lastDraw = time.Now() -} - -func (r *Reader) finishProgress() { - f := r.drawFunc() - f(r.progress, r.Size) - - // Print a newline - f(-1, -1) - - // Reset lastDraw so we don't finish again - var zeroDraw time.Time - r.lastDraw = zeroDraw -} - -func (r *Reader) initProgress() { - var zeroDraw time.Time - r.lastDraw = zeroDraw - r.drawProgress() - r.lastDraw = zeroDraw -} - -func (r *Reader) drawFunc() DrawFunc { - if r.DrawFunc == nil { - return defaultDrawFunc - } - - return r.DrawFunc -} diff --git a/mantle/vendor/github.com/coreos/stream-metadata-go/fedoracoreos/fcos.go b/mantle/vendor/github.com/coreos/stream-metadata-go/fedoracoreos/fcos.go index 866fbc26..9fc2b3e2 100644 --- a/mantle/vendor/github.com/coreos/stream-metadata-go/fedoracoreos/fcos.go +++ b/mantle/vendor/github.com/coreos/stream-metadata-go/fedoracoreos/fcos.go @@ -6,7 +6,7 @@ package fedoracoreos import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" @@ -35,7 +35,7 @@ func getStream(u url.URL) (*stream.Stream, error) { if err != nil { return nil, err } - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) resp.Body.Close() if err != nil { return nil, err diff --git a/mantle/vendor/github.com/coreos/stream-metadata-go/release/release.go b/mantle/vendor/github.com/coreos/stream-metadata-go/release/release.go index f3100b14..16d7d8fc 100644 --- a/mantle/vendor/github.com/coreos/stream-metadata-go/release/release.go +++ b/mantle/vendor/github.com/coreos/stream-metadata-go/release/release.go @@ -60,11 +60,13 @@ type Media struct { Exoscale *PlatformBase `json:"exoscale"` Gcp *PlatformGcp `json:"gcp"` Ibmcloud *PlatformIBMCloud `json:"ibmcloud"` + KubeVirt *PlatformKubeVirt `json:"kubevirt"` Metal *PlatformBase `json:"metal"` Nutanix *PlatformBase `json:"nutanix"` Openstack *PlatformBase `json:"openstack"` PowerVS *PlatformIBMCloud `json:"powervs"` Qemu *PlatformBase `json:"qemu"` + VirtualBox *PlatformBase `json:"virtualbox"` Vmware *PlatformBase `json:"vmware"` Vultr *PlatformBase `json:"vultr"` } @@ -98,6 +100,12 @@ type PlatformIBMCloud struct { Images map[string]IBMCloudImage `json:"images"` } +// PlatformKubeVirt containerDisk metadata +type PlatformKubeVirt struct { + PlatformBase + Image *ContainerImage `json:"image"` +} + // ImageFormat contains all artifacts for a single OS image type ImageFormat struct { Disk *Artifact `json:"disk,omitempty"` @@ -119,6 +127,13 @@ type CloudImage struct { Image string `json:"image"` } +// ContainerImage represents a tagged container image +type ContainerImage struct { + // Preferred way to reference the image, which might be by tag or digest + Image string `json:"image"` + DigestRef string `json:"digest-ref"` +} + // GcpImage represents a GCP cloud image type GcpImage struct { Project string `json:"project"` diff --git a/mantle/vendor/github.com/coreos/stream-metadata-go/release/translate.go b/mantle/vendor/github.com/coreos/stream-metadata-go/release/translate.go index 211fc84b..27f4fe97 100644 --- a/mantle/vendor/github.com/coreos/stream-metadata-go/release/translate.go +++ b/mantle/vendor/github.com/coreos/stream-metadata-go/release/translate.go @@ -46,12 +46,12 @@ func (releaseArch *Arch) toStreamArch(rel *Release) stream.Arch { Formats: mapFormats(releaseArch.Media.Aliyun.Artifacts), } aliyunImages := stream.ReplicatedImage{ - Regions: make(map[string]stream.RegionImage), + Regions: make(map[string]stream.SingleImage), } if releaseArch.Media.Aliyun.Images != nil { for region, image := range releaseArch.Media.Aliyun.Images { - ri := stream.RegionImage{Release: rel.Release, Image: image.Image} - aliyunImages.Regions[region] = ri + si := stream.SingleImage{Release: rel.Release, Image: image.Image} + aliyunImages.Regions[region] = si } cloudImages.Aliyun = &aliyunImages @@ -63,13 +63,13 @@ func (releaseArch *Arch) toStreamArch(rel *Release) stream.Arch { Release: rel.Release, Formats: mapFormats(releaseArch.Media.Aws.Artifacts), } - awsAmis := stream.AwsImage{ - Regions: make(map[string]stream.AwsRegionImage), + awsAmis := stream.ReplicatedImage{ + Regions: make(map[string]stream.SingleImage), } if releaseArch.Media.Aws.Images != nil { for region, ami := range releaseArch.Media.Aws.Images { - ri := stream.AwsRegionImage{Release: rel.Release, Image: ami.Image} - awsAmis.Regions[region] = ri + si := stream.SingleImage{Release: rel.Release, Image: ami.Image} + awsAmis.Regions[region] = si } cloudImages.Aws = &awsAmis @@ -132,6 +132,20 @@ func (releaseArch *Arch) toStreamArch(rel *Release) stream.Arch { } } + if releaseArch.Media.KubeVirt != nil { + artifacts["kubevirt"] = stream.PlatformArtifacts{ + Release: rel.Release, + Formats: mapFormats(releaseArch.Media.KubeVirt.Artifacts), + } + if releaseArch.Media.KubeVirt.Image != nil { + cloudImages.KubeVirt = &stream.ContainerImage{ + Release: rel.Release, + Image: releaseArch.Media.KubeVirt.Image.Image, + DigestRef: releaseArch.Media.KubeVirt.Image.DigestRef, + } + } + } + if releaseArch.Media.Digitalocean != nil { artifacts["digitalocean"] = stream.PlatformArtifacts{ Release: rel.Release, @@ -151,17 +165,17 @@ func (releaseArch *Arch) toStreamArch(rel *Release) stream.Arch { Formats: mapFormats(releaseArch.Media.Ibmcloud.Artifacts), } ibmcloudObjects := stream.ReplicatedObject{ - Regions: make(map[string]stream.RegionObject), + Regions: make(map[string]stream.SingleObject), } if releaseArch.Media.Ibmcloud.Images != nil { for region, object := range releaseArch.Media.Ibmcloud.Images { - ri := stream.RegionObject{ + so := stream.SingleObject{ Release: rel.Release, Object: object.Object, Bucket: object.Bucket, Url: object.Url, } - ibmcloudObjects.Regions[region] = ri + ibmcloudObjects.Regions[region] = so } cloudImages.Ibmcloud = &ibmcloudObjects @@ -199,17 +213,17 @@ func (releaseArch *Arch) toStreamArch(rel *Release) stream.Arch { Formats: mapFormats(releaseArch.Media.PowerVS.Artifacts), } powervsObjects := stream.ReplicatedObject{ - Regions: make(map[string]stream.RegionObject), + Regions: make(map[string]stream.SingleObject), } if releaseArch.Media.PowerVS.Images != nil { for region, object := range releaseArch.Media.PowerVS.Images { - ri := stream.RegionObject{ + so := stream.SingleObject{ Release: rel.Release, Object: object.Object, Bucket: object.Bucket, Url: object.Url, } - powervsObjects.Regions[region] = ri + powervsObjects.Regions[region] = so } cloudImages.PowerVS = &powervsObjects @@ -223,13 +237,12 @@ func (releaseArch *Arch) toStreamArch(rel *Release) stream.Arch { } } - // if releaseArch.Media.Virtualbox != nil { - // virtualbox := StreamMediaDetails{ - // Release: rel.Release, - // Formats: releaseArch.Media.Virtualbox.Artifacts, - // } - // artifacts.Virtualbox = &virtualbox - // } + if releaseArch.Media.VirtualBox != nil { + artifacts["virtualbox"] = stream.PlatformArtifacts{ + Release: rel.Release, + Formats: mapFormats(releaseArch.Media.VirtualBox.Artifacts), + } + } if releaseArch.Media.Vmware != nil { artifacts["vmware"] = stream.PlatformArtifacts{ diff --git a/mantle/vendor/github.com/coreos/stream-metadata-go/stream/artifact_utils.go b/mantle/vendor/github.com/coreos/stream-metadata-go/stream/artifact_utils.go index e23886f0..68ae6476 100644 --- a/mantle/vendor/github.com/coreos/stream-metadata-go/stream/artifact_utils.go +++ b/mantle/vendor/github.com/coreos/stream-metadata-go/stream/artifact_utils.go @@ -4,7 +4,6 @@ import ( "crypto/sha256" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -42,8 +41,8 @@ func (a *Artifact) Fetch(w io.Writer) error { return nil } -/// Name returns the "basename" of the artifact, i.e. the contents -/// after the last `/`. This can be useful when downloading to a file. +// Name returns the "basename" of the artifact, i.e. the contents +// after the last `/`. This can be useful when downloading to a file. func (a *Artifact) Name() (string, error) { loc, err := url.Parse(a.Location) if err != nil { @@ -53,17 +52,17 @@ func (a *Artifact) Name() (string, error) { return path.Base(loc.Path), nil } -/// Download fetches the specified artifact and saves it to the target -/// directory. The full file path will be returned as a string. -/// If the target file path exists, it will be overwritten. -/// If the download fails, the temporary file will be deleted. +// Download fetches the specified artifact and saves it to the target +// directory. The full file path will be returned as a string. +// If the target file path exists, it will be overwritten. +// If the download fails, the temporary file will be deleted. func (a *Artifact) Download(destdir string) (string, error) { name, err := a.Name() if err != nil { return "", err } destfile := filepath.Join(destdir, name) - w, err := ioutil.TempFile(destdir, ".coreos-artifact-") + w, err := os.CreateTemp(destdir, ".coreos-artifact-") if err != nil { return "", err } diff --git a/mantle/vendor/github.com/coreos/stream-metadata-go/stream/stream.go b/mantle/vendor/github.com/coreos/stream-metadata-go/stream/stream.go index 2001c661..1ed9b1fd 100644 --- a/mantle/vendor/github.com/coreos/stream-metadata-go/stream/stream.go +++ b/mantle/vendor/github.com/coreos/stream-metadata-go/stream/stream.go @@ -56,25 +56,38 @@ type Images struct { Aws *AwsImage `json:"aws,omitempty"` Gcp *GcpImage `json:"gcp,omitempty"` Ibmcloud *ReplicatedObject `json:"ibmcloud,omitempty"` + KubeVirt *ContainerImage `json:"kubevirt,omitempty"` PowerVS *ReplicatedObject `json:"powervs,omitempty"` } // ReplicatedImage represents an image in all regions of an AWS-like cloud type ReplicatedImage struct { - Regions map[string]RegionImage `json:"regions,omitempty"` + Regions map[string]SingleImage `json:"regions,omitempty"` } -// RegionImage represents an image in a single region of an AWS-like cloud -type RegionImage struct { +// SingleImage represents a globally-accessible image or an image in a +// single region of an AWS-like cloud +type SingleImage struct { Release string `json:"release"` Image string `json:"image"` } +// ContainerImage represents a tagged container image +type ContainerImage struct { + Release string `json:"release"` + // Preferred way to reference the image, which might be by tag or digest + Image string `json:"image"` + DigestRef string `json:"digest-ref"` +} + // AwsImage is a typedef for backwards compatibility. type AwsImage = ReplicatedImage // AwsRegionImage is a typedef for backwards compatibility. -type AwsRegionImage = RegionImage +type AwsRegionImage = SingleImage + +// RegionImage is a typedef for backwards compatibility. +type RegionImage = SingleImage // GcpImage represents a GCP cloud image type GcpImage struct { @@ -84,15 +97,20 @@ type GcpImage struct { Name string `json:"name"` } -// ReplicatedObject represents an object in all regions of an IBMCloud-like cloud +// ReplicatedObject represents an object in all regions of an IBMCloud-like +// cloud type ReplicatedObject struct { - Regions map[string]RegionObject `json:"regions,omitempty"` + Regions map[string]SingleObject `json:"regions,omitempty"` } -// RegionObject represents an IBMCloud/PowerVS cloud image -type RegionObject struct { +// SingleObject represents a globally-accessible cloud storage object, or +// an object in a single region of an IBMCloud-like cloud +type SingleObject struct { Release string `json:"release"` Object string `json:"object"` Bucket string `json:"bucket"` Url string `json:"url"` } + +// RegionObject is a typedef for backwards compatibility. +type RegionObject = SingleObject diff --git a/mantle/vendor/github.com/coreos/stream-metadata-go/stream/stream_utils.go b/mantle/vendor/github.com/coreos/stream-metadata-go/stream/stream_utils.go index 081572c7..b1ca9556 100644 --- a/mantle/vendor/github.com/coreos/stream-metadata-go/stream/stream_utils.go +++ b/mantle/vendor/github.com/coreos/stream-metadata-go/stream/stream_utils.go @@ -19,7 +19,7 @@ func (st *Stream) GetArchitecture(archname string) (*Arch, error) { // GetAliyunRegionImage returns the release data (Image ID and release ID) for a particular // architecture and region. -func (st *Stream) GetAliyunRegionImage(archname, region string) (*RegionImage, error) { +func (st *Stream) GetAliyunRegionImage(archname, region string) (*SingleImage, error) { starch, err := st.GetArchitecture(archname) if err != nil { return nil, err @@ -28,7 +28,7 @@ func (st *Stream) GetAliyunRegionImage(archname, region string) (*RegionImage, e if aliyunimages == nil { return nil, fmt.Errorf("%s: No Aliyun images", st.FormatPrefix(archname)) } - var regionVal RegionImage + var regionVal SingleImage var ok bool if regionVal, ok = aliyunimages.Regions[region]; !ok { return nil, fmt.Errorf("%s: No Aliyun images in region %s", st.FormatPrefix(archname), region) @@ -48,7 +48,7 @@ func (st *Stream) GetAliyunImage(archname, region string) (string, error) { // GetAwsRegionImage returns the release data (AMI and release ID) for a particular // architecture and region. -func (st *Stream) GetAwsRegionImage(archname, region string) (*AwsRegionImage, error) { +func (st *Stream) GetAwsRegionImage(archname, region string) (*SingleImage, error) { starch, err := st.GetArchitecture(archname) if err != nil { return nil, err @@ -57,7 +57,7 @@ func (st *Stream) GetAwsRegionImage(archname, region string) (*AwsRegionImage, e if awsimages == nil { return nil, fmt.Errorf("%s: No AWS images", st.FormatPrefix(archname)) } - var regionVal AwsRegionImage + var regionVal SingleImage var ok bool if regionVal, ok = awsimages.Regions[region]; !ok { return nil, fmt.Errorf("%s: No AWS images in region %s", st.FormatPrefix(archname), region) diff --git a/mantle/vendor/github.com/digitalocean/go-libvirt/go.mod b/mantle/vendor/github.com/digitalocean/go-libvirt/go.mod deleted file mode 100644 index aaee4db9..00000000 --- a/mantle/vendor/github.com/digitalocean/go-libvirt/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/digitalocean/go-libvirt - -go 1.14 - -require golang.org/x/tools v0.0.0-20200228224639-71482053b885 diff --git a/mantle/vendor/github.com/digitalocean/go-libvirt/go.sum b/mantle/vendor/github.com/digitalocean/go-libvirt/go.sum deleted file mode 100644 index fca050c3..00000000 --- a/mantle/vendor/github.com/digitalocean/go-libvirt/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20200228224639-71482053b885 h1:y09Juz/HD0YjGlyEd4bLUWG0s8Yx6iPniPqUGzUxNrU= -golang.org/x/tools v0.0.0-20200228224639-71482053b885/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/mantle/vendor/github.com/digitalocean/godo/go.mod b/mantle/vendor/github.com/digitalocean/godo/go.mod deleted file mode 100644 index a8af42bd..00000000 --- a/mantle/vendor/github.com/digitalocean/godo/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/digitalocean/godo - -go 1.13 - -require ( - github.com/google/go-querystring v1.0.0 - github.com/stretchr/testify v1.3.0 - golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a -) diff --git a/mantle/vendor/github.com/digitalocean/godo/go.sum b/mantle/vendor/github.com/digitalocean/godo/go.sum deleted file mode 100644 index f1bc1572..00000000 --- a/mantle/vendor/github.com/digitalocean/godo/go.sum +++ /dev/null @@ -1,23 +0,0 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/mantle/vendor/github.com/dimchansky/utfbom/go.mod b/mantle/vendor/github.com/dimchansky/utfbom/go.mod deleted file mode 100644 index 8f8620af..00000000 --- a/mantle/vendor/github.com/dimchansky/utfbom/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/dimchansky/utfbom diff --git a/mantle/vendor/github.com/dustin/go-humanize/.travis.yml b/mantle/vendor/github.com/dustin/go-humanize/.travis.yml deleted file mode 100644 index ba95cdd1..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go -go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/mantle/vendor/github.com/dustin/go-humanize/LICENSE b/mantle/vendor/github.com/dustin/go-humanize/LICENSE deleted file mode 100644 index 8d9a94a9..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2005-2008 Dustin Sallings - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - diff --git a/mantle/vendor/github.com/dustin/go-humanize/README.markdown b/mantle/vendor/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 91b4ae56..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,124 +0,0 @@ -# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize`. - -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for -complete documentation. - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83 MB` or `79 MiB` (whichever you prefer). - -Example: - -```go -fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. -``` - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - -```go -fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. -``` - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - -```go -fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. -``` - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - -```go -fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. -``` - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - -```go -fmt.Printf("%f", 2.24) // 2.240000 -fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 -fmt.Printf("%f", 2.0) // 2.000000 -fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 -``` - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - -```go -humanize.SI(0.00000000223, "M") // 2.23 nM -``` - -## English-specific functions - -The following functions are in the `humanize/english` subpackage. - -### Plurals - -Simple English pluralization - -```go -english.PluralWord(1, "object", "") // object -english.PluralWord(42, "object", "") // objects -english.PluralWord(2, "bus", "") // buses -english.PluralWord(99, "locus", "loci") // loci - -english.Plural(1, "object", "") // 1 object -english.Plural(42, "object", "") // 42 objects -english.Plural(2, "bus", "") // 2 buses -english.Plural(99, "locus", "loci") // 99 loci -``` - -### Word series - -Format comma-separated words lists with conjuctions: - -```go -english.WordSeries([]string{"foo"}, "and") // foo -english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar -english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz - -english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz -``` - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/mantle/vendor/github.com/dustin/go-humanize/big.go b/mantle/vendor/github.com/dustin/go-humanize/big.go deleted file mode 100644 index f49dc337..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/big.go +++ /dev/null @@ -1,31 +0,0 @@ -package humanize - -import ( - "math/big" -) - -// order of magnitude (to a max order) -func oomm(n, b *big.Int, maxmag int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - if mag == maxmag && maxmag >= 0 { - break - } - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} - -// total order of magnitude -// (same as above, but with no upper limit) -func oom(n, b *big.Int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/bigbytes.go b/mantle/vendor/github.com/dustin/go-humanize/bigbytes.go deleted file mode 100644 index 1a2bf617..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/bigbytes.go +++ /dev/null @@ -1,173 +0,0 @@ -package humanize - -import ( - "fmt" - "math/big" - "strings" - "unicode" -) - -var ( - bigIECExp = big.NewInt(1024) - - // BigByte is one byte in bit.Ints - BigByte = big.NewInt(1) - // BigKiByte is 1,024 bytes in bit.Ints - BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) - // BigMiByte is 1,024 k bytes in bit.Ints - BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) - // BigGiByte is 1,024 m bytes in bit.Ints - BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) - // BigTiByte is 1,024 g bytes in bit.Ints - BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) - // BigPiByte is 1,024 t bytes in bit.Ints - BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) - // BigEiByte is 1,024 p bytes in bit.Ints - BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) - // BigZiByte is 1,024 e bytes in bit.Ints - BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) - // BigYiByte is 1,024 z bytes in bit.Ints - BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) -) - -var ( - bigSIExp = big.NewInt(1000) - - // BigSIByte is one SI byte in big.Ints - BigSIByte = big.NewInt(1) - // BigKByte is 1,000 SI bytes in big.Ints - BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) - // BigMByte is 1,000 SI k bytes in big.Ints - BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) - // BigGByte is 1,000 SI m bytes in big.Ints - BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) - // BigTByte is 1,000 SI g bytes in big.Ints - BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) - // BigPByte is 1,000 SI t bytes in big.Ints - BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) - // BigEByte is 1,000 SI p bytes in big.Ints - BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) - // BigZByte is 1,000 SI e bytes in big.Ints - BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) - // BigYByte is 1,000 SI z bytes in big.Ints - BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) -) - -var bigBytesSizeTable = map[string]*big.Int{ - "b": BigByte, - "kib": BigKiByte, - "kb": BigKByte, - "mib": BigMiByte, - "mb": BigMByte, - "gib": BigGiByte, - "gb": BigGByte, - "tib": BigTiByte, - "tb": BigTByte, - "pib": BigPiByte, - "pb": BigPByte, - "eib": BigEiByte, - "eb": BigEByte, - "zib": BigZiByte, - "zb": BigZByte, - "yib": BigYiByte, - "yb": BigYByte, - // Without suffix - "": BigByte, - "ki": BigKiByte, - "k": BigKByte, - "mi": BigMiByte, - "m": BigMByte, - "gi": BigGiByte, - "g": BigGByte, - "ti": BigTiByte, - "t": BigTByte, - "pi": BigPiByte, - "p": BigPByte, - "ei": BigEiByte, - "e": BigEByte, - "z": BigZByte, - "zi": BigZiByte, - "y": BigYByte, - "yi": BigYiByte, -} - -var ten = big.NewInt(10) - -func humanateBigBytes(s, base *big.Int, sizes []string) string { - if s.Cmp(ten) < 0 { - return fmt.Sprintf("%d B", s) - } - c := (&big.Int{}).Set(s) - val, mag := oomm(c, base, len(sizes)-1) - suffix := sizes[mag] - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) - -} - -// BigBytes produces a human readable representation of an SI size. -// -// See also: ParseBigBytes. -// -// BigBytes(82854982) -> 83 MB -func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - return humanateBigBytes(s, bigSIExp, sizes) -} - -// BigIBytes produces a human readable representation of an IEC size. -// -// See also: ParseBigBytes. -// -// BigIBytes(82854982) -> 79 MiB -func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - return humanateBigBytes(s, bigIECExp, sizes) -} - -// ParseBigBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See also: BigBytes, BigIBytes. -// -// ParseBigBytes("42 MB") -> 42000000, nil -// ParseBigBytes("42 mib") -> 44040192, nil -func ParseBigBytes(s string) (*big.Int, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - val := &big.Rat{} - _, err := fmt.Sscanf(num, "%f", val) - if err != nil { - return nil, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bigBytesSizeTable[extra]; ok { - mv := (&big.Rat{}).SetInt(m) - val.Mul(val, mv) - rv := &big.Int{} - rv.Div(val.Num(), val.Denom()) - return rv, nil - } - - return nil, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/bytes.go b/mantle/vendor/github.com/dustin/go-humanize/bytes.go deleted file mode 100644 index 0b498f48..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/bytes.go +++ /dev/null @@ -1,143 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "strconv" - "strings" - "unicode" -) - -// IEC Sizes. -// kibis of bits -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte - TiByte - PiByte - EiByte -) - -// SI Sizes. -const ( - IByte = 1 - KByte = IByte * 1000 - MByte = KByte * 1000 - GByte = MByte * 1000 - TByte = GByte * 1000 - PByte = TByte * 1000 - EByte = PByte * 1000 -) - -var bytesSizeTable = map[string]uint64{ - "b": Byte, - "kib": KiByte, - "kb": KByte, - "mib": MiByte, - "mb": MByte, - "gib": GiByte, - "gb": GByte, - "tib": TiByte, - "tb": TByte, - "pib": PiByte, - "pb": PByte, - "eib": EiByte, - "eb": EByte, - // Without suffix - "": Byte, - "ki": KiByte, - "k": KByte, - "mi": MiByte, - "m": MByte, - "gi": GiByte, - "g": GByte, - "ti": TiByte, - "t": TByte, - "pi": PiByte, - "p": PByte, - "ei": EiByte, - "e": EByte, -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%d B", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) -} - -// Bytes produces a human readable representation of an SI size. -// -// See also: ParseBytes. -// -// Bytes(82854982) -> 83 MB -func Bytes(s uint64) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1000, sizes) -} - -// IBytes produces a human readable representation of an IEC size. -// -// See also: ParseBytes. -// -// IBytes(82854982) -> 79 MiB -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -// ParseBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See Also: Bytes, IBytes. -// -// ParseBytes("42 MB") -> 42000000, nil -// ParseBytes("42 mib") -> 44040192, nil -func ParseBytes(s string) (uint64, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - f, err := strconv.ParseFloat(num, 64) - if err != nil { - return 0, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bytesSizeTable[extra]; ok { - f *= float64(m) - if f >= math.MaxUint64 { - return 0, fmt.Errorf("too large: %v", s) - } - return uint64(f), nil - } - - return 0, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/comma.go b/mantle/vendor/github.com/dustin/go-humanize/comma.go deleted file mode 100644 index 520ae3e5..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/comma.go +++ /dev/null @@ -1,116 +0,0 @@ -package humanize - -import ( - "bytes" - "math" - "math/big" - "strconv" - "strings" -) - -// Comma produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142) -> 834,142 -func Comma(v int64) string { - sign := "" - - // Min int64 can't be negated to a usable value, so it has to be special cased. - if v == math.MinInt64 { - return "-9,223,372,036,854,775,808" - } - - if v < 0 { - sign = "-" - v = 0 - v - } - - parts := []string{"", "", "", "", "", "", ""} - j := len(parts) - 1 - - for v > 999 { - parts[j] = strconv.FormatInt(v%1000, 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - v = v / 1000 - j-- - } - parts[j] = strconv.Itoa(int(v)) - return sign + strings.Join(parts[j:], ",") -} - -// Commaf produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Commaf(834142.32) -> 834,142.32 -func Commaf(v float64) string { - buf := &bytes.Buffer{} - if v < 0 { - buf.Write([]byte{'-'}) - v = 0 - v - } - - comma := []byte{','} - - parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} - -// CommafWithDigits works like the Commaf but limits the resulting -// string to the given number of decimal places. -// -// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 -func CommafWithDigits(f float64, decimals int) string { - return stripTrailingDigits(Commaf(f), decimals) -} - -// BigComma produces a string form of the given big.Int in base 10 -// with commas after every three orders of magnitude. -func BigComma(b *big.Int) string { - sign := "" - if b.Sign() < 0 { - sign = "-" - b.Abs(b) - } - - athousand := big.NewInt(1000) - c := (&big.Int{}).Set(b) - _, m := oom(c, athousand) - parts := make([]string, m+1) - j := len(parts) - 1 - - mod := &big.Int{} - for b.Cmp(athousand) >= 0 { - b.DivMod(b, athousand, mod) - parts[j] = strconv.FormatInt(mod.Int64(), 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - j-- - } - parts[j] = strconv.Itoa(int(b.Int64())) - return sign + strings.Join(parts[j:], ",") -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/commaf.go b/mantle/vendor/github.com/dustin/go-humanize/commaf.go deleted file mode 100644 index 620690de..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/commaf.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6 - -package humanize - -import ( - "bytes" - "math/big" - "strings" -) - -// BigCommaf produces a string form of the given big.Float in base 10 -// with commas after every three orders of magnitude. -func BigCommaf(v *big.Float) string { - buf := &bytes.Buffer{} - if v.Sign() < 0 { - buf.Write([]byte{'-'}) - v.Abs(v) - } - - comma := []byte{','} - - parts := strings.Split(v.Text('f', -1), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/ftoa.go b/mantle/vendor/github.com/dustin/go-humanize/ftoa.go deleted file mode 100644 index 1c62b640..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/ftoa.go +++ /dev/null @@ -1,46 +0,0 @@ -package humanize - -import ( - "strconv" - "strings" -) - -func stripTrailingZeros(s string) string { - offset := len(s) - 1 - for offset > 0 { - if s[offset] == '.' { - offset-- - break - } - if s[offset] != '0' { - break - } - offset-- - } - return s[:offset+1] -} - -func stripTrailingDigits(s string, digits int) string { - if i := strings.Index(s, "."); i >= 0 { - if digits <= 0 { - return s[:i] - } - i++ - if i+digits >= len(s) { - return s - } - return s[:i+digits] - } - return s -} - -// Ftoa converts a float to a string with no trailing zeros. -func Ftoa(num float64) string { - return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) -} - -// FtoaWithDigits converts a float to a string but limits the resulting string -// to the given number of decimal places, and no trailing zeros. -func FtoaWithDigits(num float64, digits int) string { - return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/humanize.go b/mantle/vendor/github.com/dustin/go-humanize/humanize.go deleted file mode 100644 index a2c2da31..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/humanize.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package humanize converts boring ugly numbers to human-friendly strings and back. - -Durations can be turned into strings such as "3 days ago", numbers -representing sizes like 82854982 into useful strings like, "83 MB" or -"79 MiB" (whichever you prefer). -*/ -package humanize diff --git a/mantle/vendor/github.com/dustin/go-humanize/number.go b/mantle/vendor/github.com/dustin/go-humanize/number.go deleted file mode 100644 index dec61865..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/number.go +++ /dev/null @@ -1,192 +0,0 @@ -package humanize - -/* -Slightly adapted from the source to fit go-humanize. - -Author: https://github.com/gorhill -Source: https://gist.github.com/gorhill/5285193 - -*/ - -import ( - "math" - "strconv" -) - -var ( - renderFloatPrecisionMultipliers = [...]float64{ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - } - - renderFloatPrecisionRounders = [...]float64{ - 0.5, - 0.05, - 0.005, - 0.0005, - 0.00005, - 0.000005, - 0.0000005, - 0.00000005, - 0.000000005, - 0.0000000005, - } -) - -// FormatFloat produces a formatted number as string based on the following user-specified criteria: -// * thousands separator -// * decimal separator -// * decimal precision -// -// Usage: s := RenderFloat(format, n) -// The format parameter tells how to render the number n. -// -// See examples: http://play.golang.org/p/LXc1Ddm1lJ -// -// Examples of format strings, given n = 12345.6789: -// "#,###.##" => "12,345.67" -// "#,###." => "12,345" -// "#,###" => "12345,678" -// "#\u202F###,##" => "12 345,68" -// "#.###,###### => 12.345,678900 -// "" (aka default format) => 12,345.67 -// -// The highest precision allowed is 9 digits after the decimal symbol. -// There is also a version for integer number, FormatInteger(), -// which is convenient for calls within template. -func FormatFloat(format string, n float64) string { - // Special cases: - // NaN = "NaN" - // +Inf = "+Infinity" - // -Inf = "-Infinity" - if math.IsNaN(n) { - return "NaN" - } - if n > math.MaxFloat64 { - return "Infinity" - } - if n < -math.MaxFloat64 { - return "-Infinity" - } - - // default format - precision := 2 - decimalStr := "." - thousandStr := "," - positiveStr := "" - negativeStr := "-" - - if len(format) > 0 { - format := []rune(format) - - // If there is an explicit format directive, - // then default values are these: - precision = 9 - thousandStr = "" - - // collect indices of meaningful formatting directives - formatIndx := []int{} - for i, char := range format { - if char != '#' && char != '0' { - formatIndx = append(formatIndx, i) - } - } - - if len(formatIndx) > 0 { - // Directive at index 0: - // Must be a '+' - // Raise an error if not the case - // index: 0123456789 - // +0.000,000 - // +000,000.0 - // +0000.00 - // +0000 - if formatIndx[0] == 0 { - if format[formatIndx[0]] != '+' { - panic("RenderFloat(): invalid positive sign directive") - } - positiveStr = "+" - formatIndx = formatIndx[1:] - } - - // Two directives: - // First is thousands separator - // Raise an error if not followed by 3-digit - // 0123456789 - // 0.000,000 - // 000,000.00 - if len(formatIndx) == 2 { - if (formatIndx[1] - formatIndx[0]) != 4 { - panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") - } - thousandStr = string(format[formatIndx[0]]) - formatIndx = formatIndx[1:] - } - - // One directive: - // Directive is decimal separator - // The number of digit-specifier following the separator indicates wanted precision - // 0123456789 - // 0.00 - // 000,0000 - if len(formatIndx) == 1 { - decimalStr = string(format[formatIndx[0]]) - precision = len(format) - formatIndx[0] - 1 - } - } - } - - // generate sign part - var signStr string - if n >= 0.000000001 { - signStr = positiveStr - } else if n <= -0.000000001 { - signStr = negativeStr - n = -n - } else { - signStr = "" - n = 0.0 - } - - // split number into integer and fractional parts - intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) - - // generate integer part string - intStr := strconv.FormatInt(int64(intf), 10) - - // add thousand separator if required - if len(thousandStr) > 0 { - for i := len(intStr); i > 3; { - i -= 3 - intStr = intStr[:i] + thousandStr + intStr[i:] - } - } - - // no fractional part, we can leave now - if precision == 0 { - return signStr + intStr - } - - // generate fractional part - fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) - // may need padding - if len(fracStr) < precision { - fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr - } - - return signStr + intStr + decimalStr + fracStr -} - -// FormatInteger produces a formatted number as string. -// See FormatFloat. -func FormatInteger(format string, n int) string { - return FormatFloat(format, float64(n)) -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/ordinals.go b/mantle/vendor/github.com/dustin/go-humanize/ordinals.go deleted file mode 100644 index 43d88a86..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/ordinals.go +++ /dev/null @@ -1,25 +0,0 @@ -package humanize - -import "strconv" - -// Ordinal gives you the input number in a rank/ordinal format. -// -// Ordinal(3) -> 3rd -func Ordinal(x int) string { - suffix := "th" - switch x % 10 { - case 1: - if x%100 != 11 { - suffix = "st" - } - case 2: - if x%100 != 12 { - suffix = "nd" - } - case 3: - if x%100 != 13 { - suffix = "rd" - } - } - return strconv.Itoa(x) + suffix -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/si.go b/mantle/vendor/github.com/dustin/go-humanize/si.go deleted file mode 100644 index ae659e0e..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/si.go +++ /dev/null @@ -1,123 +0,0 @@ -package humanize - -import ( - "errors" - "math" - "regexp" - "strconv" -) - -var siPrefixTable = map[float64]string{ - -24: "y", // yocto - -21: "z", // zepto - -18: "a", // atto - -15: "f", // femto - -12: "p", // pico - -9: "n", // nano - -6: "µ", // micro - -3: "m", // milli - 0: "", - 3: "k", // kilo - 6: "M", // mega - 9: "G", // giga - 12: "T", // tera - 15: "P", // peta - 18: "E", // exa - 21: "Z", // zetta - 24: "Y", // yotta -} - -var revSIPrefixTable = revfmap(siPrefixTable) - -// revfmap reverses the map and precomputes the power multiplier -func revfmap(in map[float64]string) map[string]float64 { - rv := map[string]float64{} - for k, v := range in { - rv[v] = math.Pow(10, k) - } - return rv -} - -var riParseRegex *regexp.Regexp - -func init() { - ri := `^([\-0-9.]+)\s?([` - for _, v := range siPrefixTable { - ri += v - } - ri += `]?)(.*)` - - riParseRegex = regexp.MustCompile(ri) -} - -// ComputeSI finds the most appropriate SI prefix for the given number -// and returns the prefix along with the value adjusted to be within -// that prefix. -// -// See also: SI, ParseSI. -// -// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") -func ComputeSI(input float64) (float64, string) { - if input == 0 { - return 0, "" - } - mag := math.Abs(input) - exponent := math.Floor(logn(mag, 10)) - exponent = math.Floor(exponent/3) * 3 - - value := mag / math.Pow(10, exponent) - - // Handle special case where value is exactly 1000.0 - // Should return 1 M instead of 1000 k - if value == 1000.0 { - exponent += 3 - value = mag / math.Pow(10, exponent) - } - - value = math.Copysign(value, input) - - prefix := siPrefixTable[exponent] - return value, prefix -} - -// SI returns a string with default formatting. -// -// SI uses Ftoa to format float value, removing trailing zeros. -// -// See also: ComputeSI, ParseSI. -// -// e.g. SI(1000000, "B") -> 1 MB -// e.g. SI(2.2345e-12, "F") -> 2.2345 pF -func SI(input float64, unit string) string { - value, prefix := ComputeSI(input) - return Ftoa(value) + " " + prefix + unit -} - -// SIWithDigits works like SI but limits the resulting string to the -// given number of decimal places. -// -// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB -// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF -func SIWithDigits(input float64, decimals int, unit string) string { - value, prefix := ComputeSI(input) - return FtoaWithDigits(value, decimals) + " " + prefix + unit -} - -var errInvalid = errors.New("invalid input") - -// ParseSI parses an SI string back into the number and unit. -// -// See also: SI, ComputeSI. -// -// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) -func ParseSI(input string) (float64, string, error) { - found := riParseRegex.FindStringSubmatch(input) - if len(found) != 4 { - return 0, "", errInvalid - } - mag := revSIPrefixTable[found[2]] - unit := found[3] - - base, err := strconv.ParseFloat(found[1], 64) - return base * mag, unit, err -} diff --git a/mantle/vendor/github.com/dustin/go-humanize/times.go b/mantle/vendor/github.com/dustin/go-humanize/times.go deleted file mode 100644 index dd3fbf5e..00000000 --- a/mantle/vendor/github.com/dustin/go-humanize/times.go +++ /dev/null @@ -1,117 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "sort" - "time" -) - -// Seconds-based time units -const ( - Day = 24 * time.Hour - Week = 7 * Day - Month = 30 * Day - Year = 12 * Month - LongTime = 37 * Year -) - -// Time formats a time into a relative string. -// -// Time(someT) -> "3 weeks ago" -func Time(then time.Time) string { - return RelTime(then, time.Now(), "ago", "from now") -} - -// A RelTimeMagnitude struct contains a relative time point at which -// the relative format of time will switch to a new format string. A -// slice of these in ascending order by their "D" field is passed to -// CustomRelTime to format durations. -// -// The Format field is a string that may contain a "%s" which will be -// replaced with the appropriate signed label (e.g. "ago" or "from -// now") and a "%d" that will be replaced by the quantity. -// -// The DivBy field is the amount of time the time difference must be -// divided by in order to display correctly. -// -// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" -// DivBy should be time.Minute so whatever the duration is will be -// expressed in minutes. -type RelTimeMagnitude struct { - D time.Duration - Format string - DivBy time.Duration -} - -var defaultMagnitudes = []RelTimeMagnitude{ - {time.Second, "now", time.Second}, - {2 * time.Second, "1 second %s", 1}, - {time.Minute, "%d seconds %s", time.Second}, - {2 * time.Minute, "1 minute %s", 1}, - {time.Hour, "%d minutes %s", time.Minute}, - {2 * time.Hour, "1 hour %s", 1}, - {Day, "%d hours %s", time.Hour}, - {2 * Day, "1 day %s", 1}, - {Week, "%d days %s", Day}, - {2 * Week, "1 week %s", 1}, - {Month, "%d weeks %s", Week}, - {2 * Month, "1 month %s", 1}, - {Year, "%d months %s", Month}, - {18 * Month, "1 year %s", 1}, - {2 * Year, "2 years %s", 1}, - {LongTime, "%d years %s", Year}, - {math.MaxInt64, "a long while %s", 1}, -} - -// RelTime formats a time into a relative string. -// -// It takes two times and two labels. In addition to the generic time -// delta string (e.g. 5 minutes), the labels are used applied so that -// the label corresponding to the smaller time is applied. -// -// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" -func RelTime(a, b time.Time, albl, blbl string) string { - return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) -} - -// CustomRelTime formats a time into a relative string. -// -// It takes two times two labels and a table of relative time formats. -// In addition to the generic time delta string (e.g. 5 minutes), the -// labels are used applied so that the label corresponding to the -// smaller time is applied. -func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { - lbl := albl - diff := b.Sub(a) - - if a.After(b) { - lbl = blbl - diff = a.Sub(b) - } - - n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].D > diff - }) - - if n >= len(magnitudes) { - n = len(magnitudes) - 1 - } - mag := magnitudes[n] - args := []interface{}{} - escaped := false - for _, ch := range mag.Format { - if escaped { - switch ch { - case 's': - args = append(args, lbl) - case 'd': - args = append(args, diff/mag.DivBy) - } - escaped = false - } else { - escaped = ch == '%' - } - } - return fmt.Sprintf(mag.Format, args...) -} diff --git a/mantle/vendor/github.com/godbus/dbus/v5/.travis.yml b/mantle/vendor/github.com/godbus/dbus/v5/.travis.yml deleted file mode 100644 index dd676720..00000000 --- a/mantle/vendor/github.com/godbus/dbus/v5/.travis.yml +++ /dev/null @@ -1,50 +0,0 @@ -dist: bionic -language: go -go_import_path: github.com/godbus/dbus - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -matrix: - fast_finish: true - allow_failures: - - go: tip - -addons: - apt: - packages: - - dbus - - dbus-x11 - -before_install: - - export GO111MODULE=on - -script: - - go test -v -race -mod=readonly ./... # Run all the tests with the race detector enabled - - go vet ./... # go vet is the official Go static analyzer - -jobs: - include: - # The build matrix doesn't cover build stages, so manually expand - # the jobs with anchors - - &multiarch - stage: "Multiarch Test" - go: 1.11.x - env: TARGETS="386 arm arm64 ppc64le" - before_install: - - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - script: - - | - set -e - for target in $TARGETS; do - printf "\e[1mRunning test suite under ${target}.\e[0m\n" - GOARCH="$target" go test -v ./... - printf "\n\n" - done - - <<: *multiarch - go: 1.12.x - - <<: *multiarch - go: 1.13.x diff --git a/mantle/vendor/github.com/godbus/dbus/v5/README.markdown b/mantle/vendor/github.com/godbus/dbus/v5/README.markdown index fd296487..1fb2eaca 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/README.markdown +++ b/mantle/vendor/github.com/godbus/dbus/v5/README.markdown @@ -1,4 +1,4 @@ -[![Build Status](https://travis-ci.org/godbus/dbus.svg?branch=master)](https://travis-ci.org/godbus/dbus) +![Build Status](https://github.com/godbus/dbus/workflows/Go/badge.svg) dbus ---- @@ -32,6 +32,8 @@ gives a short overview over the basic usage. #### Projects using godbus - [notify](https://github.com/esiqveland/notify) provides desktop notifications over dbus into a library. - [go-bluetooth](https://github.com/muka/go-bluetooth) provides a bluetooth client over bluez dbus API. +- [playerbm](https://github.com/altdesktop/playerbm) a bookmark utility for media players. +- [iwd](https://github.com/shibumi/iwd) go bindings for the internet wireless daemon "iwd". Please note that the API is considered unstable for now and may change without further notice. diff --git a/mantle/vendor/github.com/godbus/dbus/v5/auth.go b/mantle/vendor/github.com/godbus/dbus/v5/auth.go index 31abac62..283487a0 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/auth.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/auth.go @@ -37,7 +37,7 @@ const ( // Auth defines the behaviour of an authentication mechanism. type Auth interface { - // Return the name of the mechnism, the argument to the first AUTH command + // Return the name of the mechanism, the argument to the first AUTH command // and the next status. FirstData() (name, resp []byte, status AuthStatus) diff --git a/mantle/vendor/github.com/godbus/dbus/v5/call.go b/mantle/vendor/github.com/godbus/dbus/v5/call.go index 2cb18901..b06b0635 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/call.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/call.go @@ -24,6 +24,15 @@ type Call struct { // Holds the response once the call is done. Body []interface{} + // ResponseSequence stores the sequence number of the DBus message containing + // the call response (or error). This can be compared to the sequence number + // of other call responses and signals on this connection to determine their + // relative ordering on the underlying DBus connection. + // For errors, ResponseSequence is populated only if the error came from a + // DBusMessage that was received or if there was an error receiving. In case of + // failure to make the call, ResponseSequence will be NoSequence. + ResponseSequence Sequence + // tracks context and canceler ctx context.Context ctxCanceler context.CancelFunc diff --git a/mantle/vendor/github.com/godbus/dbus/v5/conn.go b/mantle/vendor/github.com/godbus/dbus/v5/conn.go index b55bc99c..29fe018a 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/conn.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/conn.go @@ -45,6 +45,7 @@ type Conn struct { serialGen SerialGenerator inInt Interceptor outInt Interceptor + auth []Auth names *nameTracker calls *callTracker @@ -59,7 +60,8 @@ type Conn struct { func SessionBus() (conn *Conn, err error) { sessionBusLck.Lock() defer sessionBusLck.Unlock() - if sessionBus != nil { + if sessionBus != nil && + sessionBus.Connected() { return sessionBus, nil } defer func() { @@ -67,19 +69,7 @@ func SessionBus() (conn *Conn, err error) { sessionBus = conn } }() - conn, err = SessionBusPrivate() - if err != nil { - return - } - if err = conn.Auth(nil); err != nil { - conn.Close() - conn = nil - return - } - if err = conn.Hello(); err != nil { - conn.Close() - conn = nil - } + conn, err = ConnectSessionBus() return } @@ -116,7 +106,8 @@ func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Co func SystemBus() (conn *Conn, err error) { systemBusLck.Lock() defer systemBusLck.Unlock() - if systemBus != nil { + if systemBus != nil && + systemBus.Connected() { return systemBus, nil } defer func() { @@ -124,20 +115,42 @@ func SystemBus() (conn *Conn, err error) { systemBus = conn } }() - conn, err = SystemBusPrivate() + conn, err = ConnectSystemBus() + return +} + +// ConnectSessionBus connects to the session bus. +func ConnectSessionBus(opts ...ConnOption) (*Conn, error) { + address, err := getSessionBusAddress() if err != nil { - return + return nil, err } - if err = conn.Auth(nil); err != nil { - conn.Close() - conn = nil - return + return Connect(address, opts...) +} + +// ConnectSystemBus connects to the system bus. +func ConnectSystemBus(opts ...ConnOption) (*Conn, error) { + return Connect(getSystemBusPlatformAddress(), opts...) +} + +// Connect connects to the given address. +// +// Returned connection is ready to use and doesn't require calling +// Auth and Hello methods to make it usable. +func Connect(address string, opts ...ConnOption) (*Conn, error) { + conn, err := Dial(address, opts...) + if err != nil { + return nil, err + } + if err = conn.Auth(conn.auth); err != nil { + _ = conn.Close() + return nil, err } if err = conn.Hello(); err != nil { - conn.Close() - conn = nil + _ = conn.Close() + return nil, err } - return + return conn, nil } // SystemBusPrivate returns a new private connection to the system bus. @@ -197,6 +210,14 @@ func WithSerialGenerator(gen SerialGenerator) ConnOption { } } +// WithAuth sets authentication methods for the auth conversation. +func WithAuth(methods ...Auth) ConnOption { + return func(conn *Conn) error { + conn.auth = methods + return nil + } +} + // Interceptor intercepts incoming and outgoing messages. type Interceptor func(msg *Message) @@ -309,6 +330,11 @@ func (conn *Conn) Context() context.Context { return conn.ctx } +// Connected returns whether conn is connected +func (conn *Conn) Connected() bool { + return conn.ctx.Err() == nil +} + // Eavesdrop causes conn to send all incoming messages to the given channel // without further processing. Method replies, errors and signals will not be // sent to the appropriate channels and method calls will not be handled. If nil @@ -342,8 +368,9 @@ func (conn *Conn) Hello() error { } // inWorker runs in an own goroutine, reading incoming messages from the -// transport and dispatching them appropiately. +// transport and dispatching them appropriately. func (conn *Conn) inWorker() { + sequenceGen := newSequenceGenerator() for { msg, err := conn.ReadMessage() if err != nil { @@ -352,7 +379,7 @@ func (conn *Conn) inWorker() { // anything but to shut down all stuff and returns errors to all // pending replies. conn.Close() - conn.calls.finalizeAllWithError(err) + conn.calls.finalizeAllWithError(sequenceGen, err) return } // invalid messages are ignored @@ -381,13 +408,14 @@ func (conn *Conn) inWorker() { if conn.inInt != nil { conn.inInt(msg) } + sequence := sequenceGen.next() switch msg.Type { case TypeError: - conn.serialGen.RetireSerial(conn.calls.handleDBusError(msg)) + conn.serialGen.RetireSerial(conn.calls.handleDBusError(sequence, msg)) case TypeMethodReply: - conn.serialGen.RetireSerial(conn.calls.handleReply(msg)) + conn.serialGen.RetireSerial(conn.calls.handleReply(sequence, msg)) case TypeSignal: - conn.handleSignal(msg) + conn.handleSignal(sequence, msg) case TypeMethodCall: go conn.handleCall(msg) } @@ -395,7 +423,7 @@ func (conn *Conn) inWorker() { } } -func (conn *Conn) handleSignal(msg *Message) { +func (conn *Conn) handleSignal(sequence Sequence, msg *Message) { iface := msg.Headers[FieldInterface].value.(string) member := msg.Headers[FieldMember].value.(string) // as per http://dbus.freedesktop.org/doc/dbus-specification.html , @@ -421,10 +449,11 @@ func (conn *Conn) handleSignal(msg *Message) { } } signal := &Signal{ - Sender: sender, - Path: msg.Headers[FieldPath].value.(ObjectPath), - Name: iface + "." + member, - Body: msg.Body, + Sender: sender, + Path: msg.Headers[FieldPath].value.(ObjectPath), + Name: iface + "." + member, + Body: msg.Body, + Sequence: sequence, } conn.signalHandler.DeliverSignal(iface, member, signal) } @@ -442,6 +471,9 @@ func (conn *Conn) Object(dest string, path ObjectPath) BusObject { } func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) { + if msg.serial == 0 { + msg.serial = conn.getSerial() + } if conn.outInt != nil { conn.outInt(msg) } @@ -473,16 +505,16 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call { if ctx == nil { panic("nil context") } + if ch == nil { + ch = make(chan *Call, 1) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Conn).Send") + } var call *Call ctx, canceler := context.WithCancel(ctx) msg.serial = conn.getSerial() if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 { - if ch == nil { - ch = make(chan *Call, 5) - } else if cap(ch) == 0 { - panic("dbus: unbuffered channel passed to (*Conn).Send") - } call = new(Call) call.Destination, _ = msg.Headers[FieldDestination].value.(string) call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath) @@ -504,7 +536,8 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call { }) } else { canceler() - call = &Call{Err: nil} + call = &Call{Err: nil, Done: ch} + ch <- call conn.sendMessageAndIfClosed(msg, func() { call = &Call{Err: ErrClosed} }) @@ -529,7 +562,6 @@ func (conn *Conn) sendError(err error, dest string, serial uint32) { } msg := new(Message) msg.Type = TypeError - msg.serial = conn.getSerial() msg.Headers = make(map[HeaderField]Variant) if dest != "" { msg.Headers[FieldDestination] = MakeVariant(dest) @@ -548,7 +580,6 @@ func (conn *Conn) sendError(err error, dest string, serial uint32) { func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { msg := new(Message) msg.Type = TypeMethodReply - msg.serial = conn.getSerial() msg.Headers = make(map[HeaderField]Variant) if dest != "" { msg.Headers[FieldDestination] = MakeVariant(dest) @@ -564,8 +595,14 @@ func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { // AddMatchSignal registers the given match rule to receive broadcast // signals based on their contents. func (conn *Conn) AddMatchSignal(options ...MatchOption) error { + return conn.AddMatchSignalContext(context.Background(), options...) +} + +// AddMatchSignalContext acts like AddMatchSignal but takes a context. +func (conn *Conn) AddMatchSignalContext(ctx context.Context, options ...MatchOption) error { options = append([]MatchOption{withMatchType("signal")}, options...) - return conn.busObj.Call( + return conn.busObj.CallWithContext( + ctx, "org.freedesktop.DBus.AddMatch", 0, formatMatchOptions(options), ).Store() @@ -573,8 +610,14 @@ func (conn *Conn) AddMatchSignal(options ...MatchOption) error { // RemoveMatchSignal removes the first rule that matches previously registered with AddMatchSignal. func (conn *Conn) RemoveMatchSignal(options ...MatchOption) error { + return conn.RemoveMatchSignalContext(context.Background(), options...) +} + +// RemoveMatchSignalContext acts like RemoveMatchSignal but takes a context. +func (conn *Conn) RemoveMatchSignalContext(ctx context.Context, options ...MatchOption) error { options = append([]MatchOption{withMatchType("signal")}, options...) - return conn.busObj.Call( + return conn.busObj.CallWithContext( + ctx, "org.freedesktop.DBus.RemoveMatch", 0, formatMatchOptions(options), ).Store() @@ -639,10 +682,11 @@ func (e Error) Error() string { // Signal represents a D-Bus message of type Signal. The name member is given in // "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost. type Signal struct { - Sender string - Path ObjectPath - Name string - Body []interface{} + Sender string + Path ObjectPath + Name string + Body []interface{} + Sequence Sequence } // transport is a D-Bus transport. @@ -825,25 +869,25 @@ func (tracker *callTracker) track(sn uint32, call *Call) { tracker.lck.Unlock() } -func (tracker *callTracker) handleReply(msg *Message) uint32 { +func (tracker *callTracker) handleReply(sequence Sequence, msg *Message) uint32 { serial := msg.Headers[FieldReplySerial].value.(uint32) tracker.lck.RLock() _, ok := tracker.calls[serial] tracker.lck.RUnlock() if ok { - tracker.finalizeWithBody(serial, msg.Body) + tracker.finalizeWithBody(serial, sequence, msg.Body) } return serial } -func (tracker *callTracker) handleDBusError(msg *Message) uint32 { +func (tracker *callTracker) handleDBusError(sequence Sequence, msg *Message) uint32 { serial := msg.Headers[FieldReplySerial].value.(uint32) tracker.lck.RLock() _, ok := tracker.calls[serial] tracker.lck.RUnlock() if ok { name, _ := msg.Headers[FieldErrorName].value.(string) - tracker.finalizeWithError(serial, Error{name, msg.Body}) + tracker.finalizeWithError(serial, sequence, Error{name, msg.Body}) } return serial } @@ -856,7 +900,7 @@ func (tracker *callTracker) handleSendError(msg *Message, err error) { _, ok := tracker.calls[msg.serial] tracker.lck.RUnlock() if ok { - tracker.finalizeWithError(msg.serial, err) + tracker.finalizeWithError(msg.serial, NoSequence, err) } } @@ -871,7 +915,7 @@ func (tracker *callTracker) finalize(sn uint32) { } } -func (tracker *callTracker) finalizeWithBody(sn uint32, body []interface{}) { +func (tracker *callTracker) finalizeWithBody(sn uint32, sequence Sequence, body []interface{}) { tracker.lck.Lock() c, ok := tracker.calls[sn] if ok { @@ -880,11 +924,12 @@ func (tracker *callTracker) finalizeWithBody(sn uint32, body []interface{}) { tracker.lck.Unlock() if ok { c.Body = body + c.ResponseSequence = sequence c.done() } } -func (tracker *callTracker) finalizeWithError(sn uint32, err error) { +func (tracker *callTracker) finalizeWithError(sn uint32, sequence Sequence, err error) { tracker.lck.Lock() c, ok := tracker.calls[sn] if ok { @@ -893,11 +938,12 @@ func (tracker *callTracker) finalizeWithError(sn uint32, err error) { tracker.lck.Unlock() if ok { c.Err = err + c.ResponseSequence = sequence c.done() } } -func (tracker *callTracker) finalizeAllWithError(err error) { +func (tracker *callTracker) finalizeAllWithError(sequenceGen *sequenceGenerator, err error) { tracker.lck.Lock() closedCalls := make([]*Call, 0, len(tracker.calls)) for sn := range tracker.calls { @@ -907,6 +953,7 @@ func (tracker *callTracker) finalizeAllWithError(err error) { tracker.lck.Unlock() for _, call := range closedCalls { call.Err = err + call.ResponseSequence = sequenceGen.next() call.done() } } diff --git a/mantle/vendor/github.com/godbus/dbus/v5/dbus.go b/mantle/vendor/github.com/godbus/dbus/v5/dbus.go index 428923d2..ddf3b7af 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/dbus.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/dbus.go @@ -28,6 +28,7 @@ var ( interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() unixFDType = reflect.TypeOf(UnixFD(0)) unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) + errType = reflect.TypeOf((*error)(nil)).Elem() ) // An InvalidTypeError signals that a value which cannot be represented in the @@ -63,6 +64,9 @@ func storeInterfaces(src, dest interface{}) error { func store(dest, src reflect.Value) error { if dest.Kind() == reflect.Ptr { + if dest.IsNil() { + dest.Set(reflect.New(dest.Type().Elem())) + } return store(dest.Elem(), src) } switch src.Kind() { diff --git a/mantle/vendor/github.com/godbus/dbus/v5/default_handler.go b/mantle/vendor/github.com/godbus/dbus/v5/default_handler.go index 6d8bf32f..13132c6b 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/default_handler.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/default_handler.go @@ -126,14 +126,28 @@ func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) { } ret := m.Value.Call(params) - - err := ret[t.NumOut()-1].Interface().(*Error) - ret = ret[:t.NumOut()-1] + var err error + nilErr := false // The reflection will find almost-nils, let's only pass back clean ones! + if t.NumOut() > 0 { + if e, ok := ret[t.NumOut()-1].Interface().(*Error); ok { // godbus *Error + nilErr = ret[t.NumOut()-1].IsNil() + ret = ret[:t.NumOut()-1] + err = e + } else if ret[t.NumOut()-1].Type().Implements(errType) { // Go error + i := ret[t.NumOut()-1].Interface() + if i == nil { + nilErr = ret[t.NumOut()-1].IsNil() + } else { + err = i.(error) + } + ret = ret[:t.NumOut()-1] + } + } out := make([]interface{}, len(ret)) for i, val := range ret { out[i] = val.Interface() } - if err == nil { + if nilErr || err == nil { //concrete type to interface nil is a special case return out, nil } diff --git a/mantle/vendor/github.com/godbus/dbus/v5/export.go b/mantle/vendor/github.com/godbus/dbus/v5/export.go index c277ab14..2447b51d 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/export.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/export.go @@ -69,6 +69,22 @@ func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Va return methods } +func getAllMethods(in interface{}, mapping map[string]string) map[string]reflect.Value { + if in == nil { + return nil + } + methods := make(map[string]reflect.Value) + val := reflect.ValueOf(in) + typ := val.Type() + for i := 0; i < typ.NumMethod(); i++ { + methtype := typ.Method(i) + method := val.Method(i) + // map names while building table + methods[computeMethodName(methtype.Name, mapping)] = method + } + return methods +} + func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) { pointers := make([]interface{}, m.NumArguments()) decode := make([]interface{}, 0, len(body)) @@ -159,7 +175,6 @@ func (conn *Conn) handleCall(msg *Message) { if msg.Flags&FlagNoReplyExpected == 0 { reply := new(Message) reply.Type = TypeMethodReply - reply.serial = conn.getSerial() reply.Headers = make(map[HeaderField]Variant) if hasSender { reply.Headers[FieldDestination] = msg.Headers[FieldSender] @@ -195,7 +210,6 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro } msg := new(Message) msg.Type = TypeSignal - msg.serial = conn.getSerial() msg.Headers = make(map[HeaderField]Variant) msg.Headers[FieldInterface] = MakeVariant(iface) msg.Headers[FieldMember] = MakeVariant(member) @@ -247,6 +261,18 @@ func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { return conn.ExportWithMap(v, nil, path, iface) } +// ExportAll registers all exported methods defined by the given object on +// the message bus. +// +// Unlike Export there is no requirement to have the last parameter as type +// *Error. If you want to be able to return error then you can append an error +// type parameter to your method signature. If the error returned is not nil, +// it is sent back to the caller as an error. Otherwise, a method reply is +// sent with the other return values as its body. +func (conn *Conn) ExportAll(v interface{}, path ObjectPath, iface string) error { + return conn.export(getAllMethods(v, nil), path, iface, false) +} + // ExportWithMap works exactly like Export but provides the ability to remap // method names (e.g. export a lower-case method). // @@ -299,19 +325,22 @@ func (conn *Conn) ExportSubtreeMethodTable(methods map[string]interface{}, path } func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectPath, iface string, includeSubtree bool) error { - out := make(map[string]reflect.Value) - for name, method := range methods { - rval := reflect.ValueOf(method) - if rval.Kind() != reflect.Func { - continue - } - t := rval.Type() - // only track valid methods must return *Error as last arg - if t.NumOut() == 0 || - t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) { - continue + var out map[string]reflect.Value + if methods != nil { + out = make(map[string]reflect.Value) + for name, method := range methods { + rval := reflect.ValueOf(method) + if rval.Kind() != reflect.Func { + continue + } + t := rval.Type() + // only track valid methods must return *Error as last arg + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) { + continue + } + out[name] = rval } - out[name] = rval } return conn.export(out, path, iface, includeSubtree) } @@ -327,12 +356,12 @@ func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) err return nil } -// exportWithMap is the worker function for all exports/registrations. +// export is the worker function for all exports/registrations. func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error { h, ok := conn.handler.(*defaultHandler) if !ok { return fmt.Errorf( - `dbus: export only allowed on the default hander handler have %T"`, + `dbus: export only allowed on the default handler. Received: %T"`, conn.handler) } diff --git a/mantle/vendor/github.com/godbus/dbus/v5/go.mod b/mantle/vendor/github.com/godbus/dbus/v5/go.mod deleted file mode 100644 index 15b92020..00000000 --- a/mantle/vendor/github.com/godbus/dbus/v5/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/godbus/dbus/v5 - -go 1.12 diff --git a/mantle/vendor/github.com/godbus/dbus/v5/go.sum b/mantle/vendor/github.com/godbus/dbus/v5/go.sum deleted file mode 100644 index e69de29b..00000000 diff --git a/mantle/vendor/github.com/godbus/dbus/v5/match.go b/mantle/vendor/github.com/godbus/dbus/v5/match.go index 086ee336..5a607e53 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/match.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/match.go @@ -1,6 +1,7 @@ package dbus import ( + "strconv" "strings" ) @@ -60,3 +61,29 @@ func WithMatchPathNamespace(namespace ObjectPath) MatchOption { func WithMatchDestination(destination string) MatchOption { return WithMatchOption("destination", destination) } + +// WithMatchArg sets argN match option, range of N is 0 to 63. +func WithMatchArg(argIdx int, value string) MatchOption { + if argIdx < 0 || argIdx > 63 { + panic("range of argument index is 0 to 63") + } + return WithMatchOption("arg"+strconv.Itoa(argIdx), value) +} + +// WithMatchArgPath sets argN path match option, range of N is 0 to 63. +func WithMatchArgPath(argIdx int, path string) MatchOption { + if argIdx < 0 || argIdx > 63 { + panic("range of argument index is 0 to 63") + } + return WithMatchOption("arg"+strconv.Itoa(argIdx)+"path", path) +} + +// WithMatchArg0Namespace sets arg0namespace match option. +func WithMatchArg0Namespace(arg0Namespace string) MatchOption { + return WithMatchOption("arg0namespace", arg0Namespace) +} + +// WithMatchEavesdrop sets eavesdrop match option. +func WithMatchEavesdrop(eavesdrop bool) MatchOption { + return WithMatchOption("eavesdrop", strconv.FormatBool(eavesdrop)) +} diff --git a/mantle/vendor/github.com/godbus/dbus/v5/object.go b/mantle/vendor/github.com/godbus/dbus/v5/object.go index 8acd7fc8..664abb7f 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/object.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/object.go @@ -16,6 +16,7 @@ type BusObject interface { AddMatchSignal(iface, member string, options ...MatchOption) *Call RemoveMatchSignal(iface, member string, options ...MatchOption) *Call GetProperty(p string) (Variant, error) + StoreProperty(p string, value interface{}) error SetProperty(p string, v interface{}) error Destination() string Path() ObjectPath @@ -109,7 +110,6 @@ func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch method = method[i+1:] msg := new(Message) msg.Type = TypeMethodCall - msg.serial = o.conn.getSerial() msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected) msg.Headers = make(map[HeaderField]Variant) msg.Headers[FieldPath] = MakeVariant(o.path) @@ -122,68 +122,31 @@ func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch if len(args) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...)) } - if msg.Flags&FlagNoReplyExpected == 0 { - if ch == nil { - ch = make(chan *Call, 1) - } else if cap(ch) == 0 { - panic("dbus: unbuffered channel passed to (*Object).Go") - } - ctx, cancel := context.WithCancel(ctx) - call := &Call{ - Destination: o.dest, - Path: o.path, - Method: method, - Args: args, - Done: ch, - ctxCanceler: cancel, - ctx: ctx, - } - o.conn.calls.track(msg.serial, call) - o.conn.sendMessageAndIfClosed(msg, func() { - o.conn.calls.handleSendError(msg, ErrClosed) - cancel() - }) - go func() { - <-ctx.Done() - o.conn.calls.handleSendError(msg, ctx.Err()) - }() - - return call - } - done := make(chan *Call, 1) - call := &Call{ - Err: nil, - Done: done, - } - defer func() { - call.Done <- call - close(done) - }() - o.conn.sendMessageAndIfClosed(msg, func() { - call.Err = ErrClosed - }) - return call + return o.conn.SendWithContext(ctx, msg, ch) } // GetProperty calls org.freedesktop.DBus.Properties.Get on the given // object. The property name must be given in interface.member notation. func (o *Object) GetProperty(p string) (Variant, error) { + var result Variant + err := o.StoreProperty(p, &result) + return result, err +} + +// StoreProperty calls org.freedesktop.DBus.Properties.Get on the given +// object. The property name must be given in interface.member notation. +// It stores the returned property into the provided value. +func (o *Object) StoreProperty(p string, value interface{}) error { idx := strings.LastIndex(p, ".") if idx == -1 || idx+1 == len(p) { - return Variant{}, errors.New("dbus: invalid property " + p) + return errors.New("dbus: invalid property " + p) } iface := p[:idx] prop := p[idx+1:] - result := Variant{} - err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result) - - if err != nil { - return Variant{}, err - } - - return result, nil + return o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop). + Store(value) } // SetProperty calls org.freedesktop.DBus.Properties.Set on the given diff --git a/mantle/vendor/github.com/godbus/dbus/v5/sequence.go b/mantle/vendor/github.com/godbus/dbus/v5/sequence.go new file mode 100644 index 00000000..89435d39 --- /dev/null +++ b/mantle/vendor/github.com/godbus/dbus/v5/sequence.go @@ -0,0 +1,24 @@ +package dbus + +// Sequence represents the value of a monotonically increasing counter. +type Sequence uint64 + +const ( + // NoSequence indicates the absence of a sequence value. + NoSequence Sequence = 0 +) + +// sequenceGenerator represents a monotonically increasing counter. +type sequenceGenerator struct { + nextSequence Sequence +} + +func (generator *sequenceGenerator) next() Sequence { + result := generator.nextSequence + generator.nextSequence++ + return result +} + +func newSequenceGenerator() *sequenceGenerator { + return &sequenceGenerator{nextSequence: 1} +} diff --git a/mantle/vendor/github.com/godbus/dbus/v5/sequential_handler.go b/mantle/vendor/github.com/godbus/dbus/v5/sequential_handler.go new file mode 100644 index 00000000..ef2fcdba --- /dev/null +++ b/mantle/vendor/github.com/godbus/dbus/v5/sequential_handler.go @@ -0,0 +1,125 @@ +package dbus + +import ( + "sync" +) + +// NewSequentialSignalHandler returns an instance of a new +// signal handler that guarantees sequential processing of signals. It is a +// guarantee of this signal handler that signals will be written to +// channels in the order they are received on the DBus connection. +func NewSequentialSignalHandler() SignalHandler { + return &sequentialSignalHandler{} +} + +type sequentialSignalHandler struct { + mu sync.RWMutex + closed bool + signals []*sequentialSignalChannelData +} + +func (sh *sequentialSignalHandler) DeliverSignal(intf, name string, signal *Signal) { + sh.mu.RLock() + defer sh.mu.RUnlock() + if sh.closed { + return + } + for _, scd := range sh.signals { + scd.deliver(signal) + } +} + +func (sh *sequentialSignalHandler) Terminate() { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.closed { + return + } + + for _, scd := range sh.signals { + scd.close() + close(scd.ch) + } + sh.closed = true + sh.signals = nil +} + +func (sh *sequentialSignalHandler) AddSignal(ch chan<- *Signal) { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.closed { + return + } + sh.signals = append(sh.signals, newSequentialSignalChannelData(ch)) +} + +func (sh *sequentialSignalHandler) RemoveSignal(ch chan<- *Signal) { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.closed { + return + } + for i := len(sh.signals) - 1; i >= 0; i-- { + if ch == sh.signals[i].ch { + sh.signals[i].close() + copy(sh.signals[i:], sh.signals[i+1:]) + sh.signals[len(sh.signals)-1] = nil + sh.signals = sh.signals[:len(sh.signals)-1] + } + } +} + +type sequentialSignalChannelData struct { + ch chan<- *Signal + in chan *Signal + done chan struct{} +} + +func newSequentialSignalChannelData(ch chan<- *Signal) *sequentialSignalChannelData { + scd := &sequentialSignalChannelData{ + ch: ch, + in: make(chan *Signal), + done: make(chan struct{}), + } + go scd.bufferSignals() + return scd +} + +func (scd *sequentialSignalChannelData) bufferSignals() { + defer close(scd.done) + + // Ensure that signals are delivered to scd.ch in the same + // order they are received from scd.in. + var queue []*Signal + for { + if len(queue) == 0 { + signal, ok := <- scd.in + if !ok { + return + } + queue = append(queue, signal) + } + select { + case scd.ch <- queue[0]: + copy(queue, queue[1:]) + queue[len(queue)-1] = nil + queue = queue[:len(queue)-1] + case signal, ok := <-scd.in: + if !ok { + return + } + queue = append(queue, signal) + } + } +} + +func (scd *sequentialSignalChannelData) deliver(signal *Signal) { + scd.in <- signal +} + +func (scd *sequentialSignalChannelData) close() { + close(scd.in) + // Ensure that bufferSignals() has exited and won't attempt + // any future sends on scd.ch + <-scd.done +} diff --git a/mantle/vendor/github.com/godbus/dbus/v5/sig.go b/mantle/vendor/github.com/godbus/dbus/v5/sig.go index c1b80920..2d326ceb 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/sig.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/sig.go @@ -137,7 +137,7 @@ func ParseSignatureMust(s string) Signature { return sig } -// Empty retruns whether the signature is the empty signature. +// Empty returns whether the signature is the empty signature. func (s Signature) Empty() bool { return s.str == "" } diff --git a/mantle/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go b/mantle/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go index 0fc5b927..1b5ed208 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/transport_unixcred_freebsd.go @@ -10,6 +10,7 @@ package dbus /* const int sizeofPtr = sizeof(void*); #define _WANT_UCRED +#include #include */ import "C" diff --git a/mantle/vendor/github.com/godbus/dbus/v5/variant.go b/mantle/vendor/github.com/godbus/dbus/v5/variant.go index 5b51828c..f1e81f3e 100644 --- a/mantle/vendor/github.com/godbus/dbus/v5/variant.go +++ b/mantle/vendor/github.com/godbus/dbus/v5/variant.go @@ -142,3 +142,9 @@ func (v Variant) String() string { func (v Variant) Value() interface{} { return v.value } + +// Store converts the variant into a native go type using the same +// mechanism as the "Store" function. +func (v Variant) Store(value interface{}) error { + return storeInterfaces(v.value, value) +} diff --git a/mantle/vendor/github.com/google/uuid/go.mod b/mantle/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd79..00000000 --- a/mantle/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/mantle/vendor/github.com/googleapis/gax-go/v2/go.mod b/mantle/vendor/github.com/googleapis/gax-go/v2/go.mod deleted file mode 100644 index 9cdfaf44..00000000 --- a/mantle/vendor/github.com/googleapis/gax-go/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/googleapis/gax-go/v2 - -require google.golang.org/grpc v1.19.0 diff --git a/mantle/vendor/github.com/googleapis/gax-go/v2/go.sum b/mantle/vendor/github.com/googleapis/gax-go/v2/go.sum deleted file mode 100644 index 7fa23ecf..00000000 --- a/mantle/vendor/github.com/googleapis/gax-go/v2/go.sum +++ /dev/null @@ -1,25 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/mantle/vendor/github.com/gophercloud/gophercloud/go.mod b/mantle/vendor/github.com/gophercloud/gophercloud/go.mod deleted file mode 100644 index 8c83df23..00000000 --- a/mantle/vendor/github.com/gophercloud/gophercloud/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/gophercloud/gophercloud - -go 1.13 - -require ( - golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad - golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect - gopkg.in/yaml.v2 v2.3.0 -) diff --git a/mantle/vendor/github.com/gophercloud/gophercloud/go.sum b/mantle/vendor/github.com/gophercloud/gophercloud/go.sum deleted file mode 100644 index c91f7ee2..00000000 --- a/mantle/vendor/github.com/gophercloud/gophercloud/go.sum +++ /dev/null @@ -1,18 +0,0 @@ -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/mantle/vendor/github.com/jmespath/go-jmespath/go.mod b/mantle/vendor/github.com/jmespath/go-jmespath/go.mod deleted file mode 100644 index 4d448e88..00000000 --- a/mantle/vendor/github.com/jmespath/go-jmespath/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/jmespath/go-jmespath - -go 1.14 - -require github.com/jmespath/go-jmespath/internal/testify v1.5.1 diff --git a/mantle/vendor/github.com/jmespath/go-jmespath/go.sum b/mantle/vendor/github.com/jmespath/go-jmespath/go.sum deleted file mode 100644 index d2db411e..00000000 --- a/mantle/vendor/github.com/jmespath/go-jmespath/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/mantle/vendor/github.com/json-iterator/go/go.mod b/mantle/vendor/github.com/json-iterator/go/go.mod deleted file mode 100644 index e05c42ff..00000000 --- a/mantle/vendor/github.com/json-iterator/go/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/json-iterator/go - -go 1.12 - -require ( - github.com/davecgh/go-spew v1.1.1 - github.com/google/gofuzz v1.0.0 - github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 - github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 - github.com/stretchr/testify v1.3.0 -) diff --git a/mantle/vendor/github.com/json-iterator/go/go.sum b/mantle/vendor/github.com/json-iterator/go/go.sum deleted file mode 100644 index d778b5a1..00000000 --- a/mantle/vendor/github.com/json-iterator/go/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/mantle/vendor/github.com/klauspost/cpuid/.gitignore b/mantle/vendor/github.com/klauspost/cpuid/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/mantle/vendor/github.com/klauspost/cpuid/.travis.yml b/mantle/vendor/github.com/klauspost/cpuid/.travis.yml deleted file mode 100644 index 77d975fe..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/.travis.yml +++ /dev/null @@ -1,46 +0,0 @@ -language: go - -os: - - linux - - osx - - windows - -arch: - - amd64 - - arm64 - -go: - - 1.12.x - - 1.13.x - - 1.14.x - - master - -script: - - go vet ./... - - go test -race ./... - - go test -tags=noasm ./... - -stages: - - gofmt - - test - -matrix: - allow_failures: - - go: 'master' - fast_finish: true - include: - - stage: gofmt - go: 1.14.x - os: linux - arch: amd64 - script: - - diff <(gofmt -d .) <(printf "") - - diff <(gofmt -d ./private) <(printf "") - - go install github.com/klauspost/asmfmt/cmd/asmfmt - - diff <(asmfmt -d .) <(printf "") - - stage: i386 - go: 1.14.x - os: linux - arch: amd64 - script: - - GOOS=linux GOARCH=386 go test . diff --git a/mantle/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/mantle/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt deleted file mode 100644 index 2ef4714f..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt +++ /dev/null @@ -1,35 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2015- Klaus Post & Contributors. -Email: klauspost@gmail.com - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. diff --git a/mantle/vendor/github.com/klauspost/cpuid/LICENSE b/mantle/vendor/github.com/klauspost/cpuid/LICENSE deleted file mode 100644 index 5cec7ee9..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/mantle/vendor/github.com/klauspost/cpuid/README.md b/mantle/vendor/github.com/klauspost/cpuid/README.md deleted file mode 100644 index 38d4a8b9..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/README.md +++ /dev/null @@ -1,191 +0,0 @@ -# cpuid -Package cpuid provides information about the CPU running the current program. - -CPU features are detected on startup, and kept for fast access through the life of the application. -Currently x86 / x64 (AMD64/i386) and ARM (ARM64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. - -You can access the CPU information by accessing the shared CPU variable of the cpuid library. - -Package home: https://github.com/klauspost/cpuid - -[![GoDoc][1]][2] [![Build Status][3]][4] - -[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg -[2]: https://godoc.org/github.com/klauspost/cpuid -[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master -[4]: https://travis-ci.org/klauspost/cpuid - -# features - -## x86 CPU Instructions -* **CMOV** (i686 CMOV) -* **NX** (NX (No-Execute) bit) -* **AMD3DNOW** (AMD 3DNOW) -* **AMD3DNOWEXT** (AMD 3DNowExt) -* **MMX** (standard MMX) -* **MMXEXT** (SSE integer functions or AMD MMX ext) -* **SSE** (SSE functions) -* **SSE2** (P4 SSE functions) -* **SSE3** (Prescott SSE3 functions) -* **SSSE3** (Conroe SSSE3 functions) -* **SSE4** (Penryn SSE4.1 functions) -* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) -* **SSE42** (Nehalem SSE4.2 functions) -* **AVX** (AVX functions) -* **AVX2** (AVX2 functions) -* **FMA3** (Intel FMA 3) -* **FMA4** (Bulldozer FMA4 functions) -* **XOP** (Bulldozer XOP functions) -* **F16C** (Half-precision floating-point conversion) -* **BMI1** (Bit Manipulation Instruction Set 1) -* **BMI2** (Bit Manipulation Instruction Set 2) -* **TBM** (AMD Trailing Bit Manipulation) -* **LZCNT** (LZCNT instruction) -* **POPCNT** (POPCNT instruction) -* **AESNI** (Advanced Encryption Standard New Instructions) -* **CLMUL** (Carry-less Multiplication) -* **HTT** (Hyperthreading (enabled)) -* **HLE** (Hardware Lock Elision) -* **RTM** (Restricted Transactional Memory) -* **RDRAND** (RDRAND instruction is available) -* **RDSEED** (RDSEED instruction is available) -* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) -* **SHA** (Intel SHA Extensions) -* **AVX512F** (AVX-512 Foundation) -* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) -* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) -* **AVX512PF** (AVX-512 Prefetch Instructions) -* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) -* **AVX512CD** (AVX-512 Conflict Detection Instructions) -* **AVX512BW** (AVX-512 Byte and Word Instructions) -* **AVX512VL** (AVX-512 Vector Length Extensions) -* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) -* **AVX512VBMI2** (AVX-512 Vector Bit Manipulation Instructions, Version 2) -* **AVX512VNNI** (AVX-512 Vector Neural Network Instructions) -* **AVX512VPOPCNTDQ** (AVX-512 Vector Population Count Doubleword and Quadword) -* **GFNI** (Galois Field New Instructions) -* **VAES** (Vector AES) -* **AVX512BITALG** (AVX-512 Bit Algorithms) -* **VPCLMULQDQ** (Carry-Less Multiplication Quadword) -* **AVX512BF16** (AVX-512 BFLOAT16 Instructions) -* **AVX512VP2INTERSECT** (AVX-512 Intersect for D/Q) -* **MPX** (Intel MPX (Memory Protection Extensions)) -* **ERMS** (Enhanced REP MOVSB/STOSB) -* **RDTSCP** (RDTSCP Instruction) -* **CX16** (CMPXCHG16B Instruction) -* **SGX** (Software Guard Extensions, with activation details) -* **VMX** (Virtual Machine Extensions) - -## Performance -* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. -* **SSE2SLOW** (SSE2 is supported, but usually not faster) -* **SSE3SLOW** (SSE3 is supported, but usually not faster) -* **ATOM** (Atom processor, some SSSE3 instructions are slower) -* **Cache line** (Probable size of a cache line). -* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. - -## ARM CPU features - -# ARM FEATURE DETECTION DISABLED! - -See [#52](https://github.com/klauspost/cpuid/issues/52). - -Currently only `arm64` platforms are implemented. - -* **FP** Single-precision and double-precision floating point -* **ASIMD** Advanced SIMD -* **EVTSTRM** Generic timer -* **AES** AES instructions -* **PMULL** Polynomial Multiply instructions (PMULL/PMULL2) -* **SHA1** SHA-1 instructions (SHA1C, etc) -* **SHA2** SHA-2 instructions (SHA256H, etc) -* **CRC32** CRC32/CRC32C instructions -* **ATOMICS** Large System Extensions (LSE) -* **FPHP** Half-precision floating point -* **ASIMDHP** Advanced SIMD half-precision floating point -* **ARMCPUID** Some CPU ID registers readable at user-level -* **ASIMDRDM** Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) -* **JSCVT** Javascript-style double->int convert (FJCVTZS) -* **FCMA** Floating point complex number addition and multiplication -* **LRCPC** Weaker release consistency (LDAPR, etc) -* **DCPOP** Data cache clean to Point of Persistence (DC CVAP) -* **SHA3** SHA-3 instructions (EOR3, RAXI, XAR, BCAX) -* **SM3** SM3 instructions -* **SM4** SM4 instructions -* **ASIMDDP** SIMD Dot Product -* **SHA512** SHA512 instructions -* **SVE** Scalable Vector Extension -* **GPA** Generic Pointer Authentication - -## Cpu Vendor/VM -* **Intel** -* **AMD** -* **VIA** -* **Transmeta** -* **NSC** -* **KVM** (Kernel-based Virtual Machine) -* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) -* **VMware** -* **XenHVM** -* **Bhyve** -* **Hygon** - -# installing - -```go get github.com/klauspost/cpuid``` - -# example - -```Go -package main - -import ( - "fmt" - "github.com/klauspost/cpuid" -) - -func main() { - // Print basic CPU information: - fmt.Println("Name:", cpuid.CPU.BrandName) - fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) - fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) - fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) - fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) - fmt.Println("Features:", cpuid.CPU.Features) - fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) - fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") - fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") - fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") - fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") - - // Test if we have a specific feature: - if cpuid.CPU.SSE() { - fmt.Println("We have Streaming SIMD Extensions") - } -} -``` - -Sample output: -``` ->go run main.go -Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz -PhysicalCores: 2 -ThreadsPerCore: 2 -LogicalCores: 4 -Family 6 Model: 42 -Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL -Cacheline bytes: 64 -We have Streaming SIMD Extensions -``` - -# private package - -In the "private" folder you can find an autogenerated version of the library you can include in your own packages. - -For this purpose all exports are removed, and functions and constants are lowercased. - -This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. - -# license - -This code is published under an MIT license. See LICENSE file for more information. diff --git a/mantle/vendor/github.com/klauspost/cpuid/cpuid.go b/mantle/vendor/github.com/klauspost/cpuid/cpuid.go deleted file mode 100644 index 208b3e79..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/cpuid.go +++ /dev/null @@ -1,1504 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// Package cpuid provides information about the CPU running the current program. -// -// CPU features are detected on startup, and kept for fast access through the life of the application. -// Currently x86 / x64 (AMD64) as well as arm64 is supported. -// -// You can access the CPU information by accessing the shared CPU variable of the cpuid library. -// -// Package home: https://github.com/klauspost/cpuid -package cpuid - -import ( - "math" - "strings" -) - -// AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf -// and Processor Programming Reference (PPR) - -// Vendor is a representation of a CPU vendor. -type Vendor int - -const ( - Other Vendor = iota - Intel - AMD - VIA - Transmeta - NSC - KVM // Kernel-based Virtual Machine - MSVM // Microsoft Hyper-V or Windows Virtual PC - VMware - XenHVM - Bhyve - Hygon - SiS - RDC -) - -const ( - CMOV = 1 << iota // i686 CMOV - NX // NX (No-Execute) bit - AMD3DNOW // AMD 3DNOW - AMD3DNOWEXT // AMD 3DNowExt - MMX // standard MMX - MMXEXT // SSE integer functions or AMD MMX ext - SSE // SSE functions - SSE2 // P4 SSE functions - SSE3 // Prescott SSE3 functions - SSSE3 // Conroe SSSE3 functions - SSE4 // Penryn SSE4.1 functions - SSE4A // AMD Barcelona microarchitecture SSE4a instructions - SSE42 // Nehalem SSE4.2 functions - AVX // AVX functions - AVX2 // AVX2 functions - FMA3 // Intel FMA 3 - FMA4 // Bulldozer FMA4 functions - XOP // Bulldozer XOP functions - F16C // Half-precision floating-point conversion - BMI1 // Bit Manipulation Instruction Set 1 - BMI2 // Bit Manipulation Instruction Set 2 - TBM // AMD Trailing Bit Manipulation - LZCNT // LZCNT instruction - POPCNT // POPCNT instruction - AESNI // Advanced Encryption Standard New Instructions - CLMUL // Carry-less Multiplication - HTT // Hyperthreading (enabled) - HLE // Hardware Lock Elision - RTM // Restricted Transactional Memory - RDRAND // RDRAND instruction is available - RDSEED // RDSEED instruction is available - ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - SHA // Intel SHA Extensions - AVX512F // AVX-512 Foundation - AVX512DQ // AVX-512 Doubleword and Quadword Instructions - AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF // AVX-512 Prefetch Instructions - AVX512ER // AVX-512 Exponential and Reciprocal Instructions - AVX512CD // AVX-512 Conflict Detection Instructions - AVX512BW // AVX-512 Byte and Word Instructions - AVX512VL // AVX-512 Vector Length Extensions - AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions - AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 - AVX512VNNI // AVX-512 Vector Neural Network Instructions - AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword - GFNI // Galois Field New Instructions - VAES // Vector AES - AVX512BITALG // AVX-512 Bit Algorithms - VPCLMULQDQ // Carry-Less Multiplication Quadword - AVX512BF16 // AVX-512 BFLOAT16 Instructions - AVX512VP2INTERSECT // AVX-512 Intersect for D/Q - MPX // Intel MPX (Memory Protection Extensions) - ERMS // Enhanced REP MOVSB/STOSB - RDTSCP // RDTSCP Instruction - CX16 // CMPXCHG16B Instruction - SGX // Software Guard Extensions - SGXLC // Software Guard Extensions Launch Control - IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) - STIBP // Single Thread Indirect Branch Predictors - VMX // Virtual Machine Extensions - - // Performance indicators - SSE2SLOW // SSE2 is supported, but usually not faster - SSE3SLOW // SSE3 is supported, but usually not faster - ATOM // Atom processor, some SSSE3 instructions are slower -) - -var flagNames = map[Flags]string{ - CMOV: "CMOV", // i686 CMOV - NX: "NX", // NX (No-Execute) bit - AMD3DNOW: "AMD3DNOW", // AMD 3DNOW - AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt - MMX: "MMX", // Standard MMX - MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext - SSE: "SSE", // SSE functions - SSE2: "SSE2", // P4 SSE2 functions - SSE3: "SSE3", // Prescott SSE3 functions - SSSE3: "SSSE3", // Conroe SSSE3 functions - SSE4: "SSE4.1", // Penryn SSE4.1 functions - SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions - SSE42: "SSE4.2", // Nehalem SSE4.2 functions - AVX: "AVX", // AVX functions - AVX2: "AVX2", // AVX functions - FMA3: "FMA3", // Intel FMA 3 - FMA4: "FMA4", // Bulldozer FMA4 functions - XOP: "XOP", // Bulldozer XOP functions - F16C: "F16C", // Half-precision floating-point conversion - BMI1: "BMI1", // Bit Manipulation Instruction Set 1 - BMI2: "BMI2", // Bit Manipulation Instruction Set 2 - TBM: "TBM", // AMD Trailing Bit Manipulation - LZCNT: "LZCNT", // LZCNT instruction - POPCNT: "POPCNT", // POPCNT instruction - AESNI: "AESNI", // Advanced Encryption Standard New Instructions - CLMUL: "CLMUL", // Carry-less Multiplication - HTT: "HTT", // Hyperthreading (enabled) - HLE: "HLE", // Hardware Lock Elision - RTM: "RTM", // Restricted Transactional Memory - RDRAND: "RDRAND", // RDRAND instruction is available - RDSEED: "RDSEED", // RDSEED instruction is available - ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - SHA: "SHA", // Intel SHA Extensions - AVX512F: "AVX512F", // AVX-512 Foundation - AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions - AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions - AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions - AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions - AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions - AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions - AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions - AVX512VBMI2: "AVX512VBMI2", // AVX-512 Vector Bit Manipulation Instructions, Version 2 - AVX512VNNI: "AVX512VNNI", // AVX-512 Vector Neural Network Instructions - AVX512VPOPCNTDQ: "AVX512VPOPCNTDQ", // AVX-512 Vector Population Count Doubleword and Quadword - GFNI: "GFNI", // Galois Field New Instructions - VAES: "VAES", // Vector AES - AVX512BITALG: "AVX512BITALG", // AVX-512 Bit Algorithms - VPCLMULQDQ: "VPCLMULQDQ", // Carry-Less Multiplication Quadword - AVX512BF16: "AVX512BF16", // AVX-512 BFLOAT16 Instruction - AVX512VP2INTERSECT: "AVX512VP2INTERSECT", // AVX-512 Intersect for D/Q - MPX: "MPX", // Intel MPX (Memory Protection Extensions) - ERMS: "ERMS", // Enhanced REP MOVSB/STOSB - RDTSCP: "RDTSCP", // RDTSCP Instruction - CX16: "CX16", // CMPXCHG16B Instruction - SGX: "SGX", // Software Guard Extensions - SGXLC: "SGXLC", // Software Guard Extensions Launch Control - IBPB: "IBPB", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier - STIBP: "STIBP", // Single Thread Indirect Branch Predictors - VMX: "VMX", // Virtual Machine Extensions - - // Performance indicators - SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster - SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster - ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower - -} - -/* all special features for arm64 should be defined here */ -const ( - /* extension instructions */ - FP ArmFlags = 1 << iota - ASIMD - EVTSTRM - AES - PMULL - SHA1 - SHA2 - CRC32 - ATOMICS - FPHP - ASIMDHP - ARMCPUID - ASIMDRDM - JSCVT - FCMA - LRCPC - DCPOP - SHA3 - SM3 - SM4 - ASIMDDP - SHA512 - SVE - GPA -) - -var flagNamesArm = map[ArmFlags]string{ - FP: "FP", // Single-precision and double-precision floating point - ASIMD: "ASIMD", // Advanced SIMD - EVTSTRM: "EVTSTRM", // Generic timer - AES: "AES", // AES instructions - PMULL: "PMULL", // Polynomial Multiply instructions (PMULL/PMULL2) - SHA1: "SHA1", // SHA-1 instructions (SHA1C, etc) - SHA2: "SHA2", // SHA-2 instructions (SHA256H, etc) - CRC32: "CRC32", // CRC32/CRC32C instructions - ATOMICS: "ATOMICS", // Large System Extensions (LSE) - FPHP: "FPHP", // Half-precision floating point - ASIMDHP: "ASIMDHP", // Advanced SIMD half-precision floating point - ARMCPUID: "CPUID", // Some CPU ID registers readable at user-level - ASIMDRDM: "ASIMDRDM", // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) - JSCVT: "JSCVT", // Javascript-style double->int convert (FJCVTZS) - FCMA: "FCMA", // Floatin point complex number addition and multiplication - LRCPC: "LRCPC", // Weaker release consistency (LDAPR, etc) - DCPOP: "DCPOP", // Data cache clean to Point of Persistence (DC CVAP) - SHA3: "SHA3", // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) - SM3: "SM3", // SM3 instructions - SM4: "SM4", // SM4 instructions - ASIMDDP: "ASIMDDP", // SIMD Dot Product - SHA512: "SHA512", // SHA512 instructions - SVE: "SVE", // Scalable Vector Extension - GPA: "GPA", // Generic Pointer Authentication -} - -// CPUInfo contains information about the detected system CPU. -type CPUInfo struct { - BrandName string // Brand name reported by the CPU - VendorID Vendor // Comparable CPU vendor ID - VendorString string // Raw vendor string. - Features Flags // Features of the CPU (x64) - Arm ArmFlags // Features of the CPU (arm) - PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. - ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. - LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. - Family int // CPU family number - Model int // CPU model number - CacheLine int // Cache line size in bytes. Will be 0 if undetectable. - Hz int64 // Clock speed, if known - Cache struct { - L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected - L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected - L2 int // L2 Cache (per core or shared). Will be -1 if undetected - L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected - } - SGX SGXSupport - maxFunc uint32 - maxExFunc uint32 -} - -var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) -var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) -var xgetbv func(index uint32) (eax, edx uint32) -var rdtscpAsm func() (eax, ebx, ecx, edx uint32) - -// CPU contains information about the CPU as detected on startup, -// or when Detect last was called. -// -// Use this as the primary entry point to you data. -var CPU CPUInfo - -func init() { - initCPU() - Detect() -} - -// Detect will re-detect current CPU info. -// This will replace the content of the exported CPU variable. -// -// Unless you expect the CPU to change while you are running your program -// you should not need to call this function. -// If you call this, you must ensure that no other goroutine is accessing the -// exported CPU variable. -func Detect() { - // Set defaults - CPU.ThreadsPerCore = 1 - CPU.Cache.L1I = -1 - CPU.Cache.L1D = -1 - CPU.Cache.L2 = -1 - CPU.Cache.L3 = -1 - addInfo(&CPU) -} - -// Generated here: http://play.golang.org/p/BxFH2Gdc0G - -// Cmov indicates support of CMOV instructions -func (c CPUInfo) Cmov() bool { - return c.Features&CMOV != 0 -} - -// Amd3dnow indicates support of AMD 3DNOW! instructions -func (c CPUInfo) Amd3dnow() bool { - return c.Features&AMD3DNOW != 0 -} - -// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions -func (c CPUInfo) Amd3dnowExt() bool { - return c.Features&AMD3DNOWEXT != 0 -} - -// VMX indicates support of VMX -func (c CPUInfo) VMX() bool { - return c.Features&VMX != 0 -} - -// MMX indicates support of MMX instructions -func (c CPUInfo) MMX() bool { - return c.Features&MMX != 0 -} - -// MMXExt indicates support of MMXEXT instructions -// (SSE integer functions or AMD MMX ext) -func (c CPUInfo) MMXExt() bool { - return c.Features&MMXEXT != 0 -} - -// SSE indicates support of SSE instructions -func (c CPUInfo) SSE() bool { - return c.Features&SSE != 0 -} - -// SSE2 indicates support of SSE 2 instructions -func (c CPUInfo) SSE2() bool { - return c.Features&SSE2 != 0 -} - -// SSE3 indicates support of SSE 3 instructions -func (c CPUInfo) SSE3() bool { - return c.Features&SSE3 != 0 -} - -// SSSE3 indicates support of SSSE 3 instructions -func (c CPUInfo) SSSE3() bool { - return c.Features&SSSE3 != 0 -} - -// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions -func (c CPUInfo) SSE4() bool { - return c.Features&SSE4 != 0 -} - -// SSE42 indicates support of SSE4.2 instructions -func (c CPUInfo) SSE42() bool { - return c.Features&SSE42 != 0 -} - -// AVX indicates support of AVX instructions -// and operating system support of AVX instructions -func (c CPUInfo) AVX() bool { - return c.Features&AVX != 0 -} - -// AVX2 indicates support of AVX2 instructions -func (c CPUInfo) AVX2() bool { - return c.Features&AVX2 != 0 -} - -// FMA3 indicates support of FMA3 instructions -func (c CPUInfo) FMA3() bool { - return c.Features&FMA3 != 0 -} - -// FMA4 indicates support of FMA4 instructions -func (c CPUInfo) FMA4() bool { - return c.Features&FMA4 != 0 -} - -// XOP indicates support of XOP instructions -func (c CPUInfo) XOP() bool { - return c.Features&XOP != 0 -} - -// F16C indicates support of F16C instructions -func (c CPUInfo) F16C() bool { - return c.Features&F16C != 0 -} - -// BMI1 indicates support of BMI1 instructions -func (c CPUInfo) BMI1() bool { - return c.Features&BMI1 != 0 -} - -// BMI2 indicates support of BMI2 instructions -func (c CPUInfo) BMI2() bool { - return c.Features&BMI2 != 0 -} - -// TBM indicates support of TBM instructions -// (AMD Trailing Bit Manipulation) -func (c CPUInfo) TBM() bool { - return c.Features&TBM != 0 -} - -// Lzcnt indicates support of LZCNT instruction -func (c CPUInfo) Lzcnt() bool { - return c.Features&LZCNT != 0 -} - -// Popcnt indicates support of POPCNT instruction -func (c CPUInfo) Popcnt() bool { - return c.Features&POPCNT != 0 -} - -// HTT indicates the processor has Hyperthreading enabled -func (c CPUInfo) HTT() bool { - return c.Features&HTT != 0 -} - -// SSE2Slow indicates that SSE2 may be slow on this processor -func (c CPUInfo) SSE2Slow() bool { - return c.Features&SSE2SLOW != 0 -} - -// SSE3Slow indicates that SSE3 may be slow on this processor -func (c CPUInfo) SSE3Slow() bool { - return c.Features&SSE3SLOW != 0 -} - -// AesNi indicates support of AES-NI instructions -// (Advanced Encryption Standard New Instructions) -func (c CPUInfo) AesNi() bool { - return c.Features&AESNI != 0 -} - -// Clmul indicates support of CLMUL instructions -// (Carry-less Multiplication) -func (c CPUInfo) Clmul() bool { - return c.Features&CLMUL != 0 -} - -// NX indicates support of NX (No-Execute) bit -func (c CPUInfo) NX() bool { - return c.Features&NX != 0 -} - -// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions -func (c CPUInfo) SSE4A() bool { - return c.Features&SSE4A != 0 -} - -// HLE indicates support of Hardware Lock Elision -func (c CPUInfo) HLE() bool { - return c.Features&HLE != 0 -} - -// RTM indicates support of Restricted Transactional Memory -func (c CPUInfo) RTM() bool { - return c.Features&RTM != 0 -} - -// Rdrand indicates support of RDRAND instruction is available -func (c CPUInfo) Rdrand() bool { - return c.Features&RDRAND != 0 -} - -// Rdseed indicates support of RDSEED instruction is available -func (c CPUInfo) Rdseed() bool { - return c.Features&RDSEED != 0 -} - -// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) -func (c CPUInfo) ADX() bool { - return c.Features&ADX != 0 -} - -// SHA indicates support of Intel SHA Extensions -func (c CPUInfo) SHA() bool { - return c.Features&SHA != 0 -} - -// AVX512F indicates support of AVX-512 Foundation -func (c CPUInfo) AVX512F() bool { - return c.Features&AVX512F != 0 -} - -// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions -func (c CPUInfo) AVX512DQ() bool { - return c.Features&AVX512DQ != 0 -} - -// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions -func (c CPUInfo) AVX512IFMA() bool { - return c.Features&AVX512IFMA != 0 -} - -// AVX512PF indicates support of AVX-512 Prefetch Instructions -func (c CPUInfo) AVX512PF() bool { - return c.Features&AVX512PF != 0 -} - -// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions -func (c CPUInfo) AVX512ER() bool { - return c.Features&AVX512ER != 0 -} - -// AVX512CD indicates support of AVX-512 Conflict Detection Instructions -func (c CPUInfo) AVX512CD() bool { - return c.Features&AVX512CD != 0 -} - -// AVX512BW indicates support of AVX-512 Byte and Word Instructions -func (c CPUInfo) AVX512BW() bool { - return c.Features&AVX512BW != 0 -} - -// AVX512VL indicates support of AVX-512 Vector Length Extensions -func (c CPUInfo) AVX512VL() bool { - return c.Features&AVX512VL != 0 -} - -// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions -func (c CPUInfo) AVX512VBMI() bool { - return c.Features&AVX512VBMI != 0 -} - -// AVX512VBMI2 indicates support of AVX-512 Vector Bit Manipulation Instructions, Version 2 -func (c CPUInfo) AVX512VBMI2() bool { - return c.Features&AVX512VBMI2 != 0 -} - -// AVX512VNNI indicates support of AVX-512 Vector Neural Network Instructions -func (c CPUInfo) AVX512VNNI() bool { - return c.Features&AVX512VNNI != 0 -} - -// AVX512VPOPCNTDQ indicates support of AVX-512 Vector Population Count Doubleword and Quadword -func (c CPUInfo) AVX512VPOPCNTDQ() bool { - return c.Features&AVX512VPOPCNTDQ != 0 -} - -// GFNI indicates support of Galois Field New Instructions -func (c CPUInfo) GFNI() bool { - return c.Features&GFNI != 0 -} - -// VAES indicates support of Vector AES -func (c CPUInfo) VAES() bool { - return c.Features&VAES != 0 -} - -// AVX512BITALG indicates support of AVX-512 Bit Algorithms -func (c CPUInfo) AVX512BITALG() bool { - return c.Features&AVX512BITALG != 0 -} - -// VPCLMULQDQ indicates support of Carry-Less Multiplication Quadword -func (c CPUInfo) VPCLMULQDQ() bool { - return c.Features&VPCLMULQDQ != 0 -} - -// AVX512BF16 indicates support of -func (c CPUInfo) AVX512BF16() bool { - return c.Features&AVX512BF16 != 0 -} - -// AVX512VP2INTERSECT indicates support of -func (c CPUInfo) AVX512VP2INTERSECT() bool { - return c.Features&AVX512VP2INTERSECT != 0 -} - -// MPX indicates support of Intel MPX (Memory Protection Extensions) -func (c CPUInfo) MPX() bool { - return c.Features&MPX != 0 -} - -// ERMS indicates support of Enhanced REP MOVSB/STOSB -func (c CPUInfo) ERMS() bool { - return c.Features&ERMS != 0 -} - -// RDTSCP Instruction is available. -func (c CPUInfo) RDTSCP() bool { - return c.Features&RDTSCP != 0 -} - -// CX16 indicates if CMPXCHG16B instruction is available. -func (c CPUInfo) CX16() bool { - return c.Features&CX16 != 0 -} - -// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. -// So TSX simply checks that. -func (c CPUInfo) TSX() bool { - return c.Features&(HLE|RTM) == HLE|RTM -} - -// Atom indicates an Atom processor -func (c CPUInfo) Atom() bool { - return c.Features&ATOM != 0 -} - -// Intel returns true if vendor is recognized as Intel -func (c CPUInfo) Intel() bool { - return c.VendorID == Intel -} - -// AMD returns true if vendor is recognized as AMD -func (c CPUInfo) AMD() bool { - return c.VendorID == AMD -} - -// Hygon returns true if vendor is recognized as Hygon -func (c CPUInfo) Hygon() bool { - return c.VendorID == Hygon -} - -// Transmeta returns true if vendor is recognized as Transmeta -func (c CPUInfo) Transmeta() bool { - return c.VendorID == Transmeta -} - -// NSC returns true if vendor is recognized as National Semiconductor -func (c CPUInfo) NSC() bool { - return c.VendorID == NSC -} - -// VIA returns true if vendor is recognized as VIA -func (c CPUInfo) VIA() bool { - return c.VendorID == VIA -} - -// RTCounter returns the 64-bit time-stamp counter -// Uses the RDTSCP instruction. The value 0 is returned -// if the CPU does not support the instruction. -func (c CPUInfo) RTCounter() uint64 { - if !c.RDTSCP() { - return 0 - } - a, _, _, d := rdtscpAsm() - return uint64(a) | (uint64(d) << 32) -} - -// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. -// This variable is OS dependent, but on Linux contains information -// about the current cpu/core the code is running on. -// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. -func (c CPUInfo) Ia32TscAux() uint32 { - if !c.RDTSCP() { - return 0 - } - _, _, ecx, _ := rdtscpAsm() - return ecx -} - -// LogicalCPU will return the Logical CPU the code is currently executing on. -// This is likely to change when the OS re-schedules the running thread -// to another CPU. -// If the current core cannot be detected, -1 will be returned. -func (c CPUInfo) LogicalCPU() int { - if c.maxFunc < 1 { - return -1 - } - _, ebx, _, _ := cpuid(1) - return int(ebx >> 24) -} - -// hertz tries to compute the clock speed of the CPU. If leaf 15 is -// supported, use it, otherwise parse the brand string. Yes, really. -func hertz(model string) int64 { - mfi := maxFunctionID() - if mfi >= 0x15 { - eax, ebx, ecx, _ := cpuid(0x15) - if eax != 0 && ebx != 0 && ecx != 0 { - return int64((int64(ecx) * int64(ebx)) / int64(eax)) - } - } - // computeHz determines the official rated speed of a CPU from its brand - // string. This insanity is *actually the official documented way to do - // this according to Intel*, prior to leaf 0x15 existing. The official - // documentation only shows this working for exactly `x.xx` or `xxxx` - // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other - // sizes. - hz := strings.LastIndex(model, "Hz") - if hz < 3 { - return -1 - } - var multiplier int64 - switch model[hz-1] { - case 'M': - multiplier = 1000 * 1000 - case 'G': - multiplier = 1000 * 1000 * 1000 - case 'T': - multiplier = 1000 * 1000 * 1000 * 1000 - } - if multiplier == 0 { - return -1 - } - freq := int64(0) - divisor := int64(0) - decimalShift := int64(1) - var i int - for i = hz - 2; i >= 0 && model[i] != ' '; i-- { - if model[i] >= '0' && model[i] <= '9' { - freq += int64(model[i]-'0') * decimalShift - decimalShift *= 10 - } else if model[i] == '.' { - if divisor != 0 { - return -1 - } - divisor = decimalShift - } else { - return -1 - } - } - // we didn't find a space - if i < 0 { - return -1 - } - if divisor != 0 { - return (freq * multiplier) / divisor - } - return freq * multiplier -} - -// VM Will return true if the cpu id indicates we are in -// a virtual machine. This is only a hint, and will very likely -// have many false negatives. -func (c CPUInfo) VM() bool { - switch c.VendorID { - case MSVM, KVM, VMware, XenHVM, Bhyve: - return true - } - return false -} - -// Flags contains detected cpu features and characteristics -type Flags uint64 - -// ArmFlags contains detected ARM cpu features and characteristics -type ArmFlags uint64 - -// String returns a string representation of the detected -// CPU features. -func (f Flags) String() string { - return strings.Join(f.Strings(), ",") -} - -// Strings returns an array of the detected features. -func (f Flags) Strings() []string { - r := make([]string, 0, 20) - for i := uint(0); i < 64; i++ { - key := Flags(1 << i) - val := flagNames[key] - if f&key != 0 { - r = append(r, val) - } - } - return r -} - -// String returns a string representation of the detected -// CPU features. -func (f ArmFlags) String() string { - return strings.Join(f.Strings(), ",") -} - -// Strings returns an array of the detected features. -func (f ArmFlags) Strings() []string { - r := make([]string, 0, 20) - for i := uint(0); i < 64; i++ { - key := ArmFlags(1 << i) - val := flagNamesArm[key] - if f&key != 0 { - r = append(r, val) - } - } - return r -} -func maxExtendedFunction() uint32 { - eax, _, _, _ := cpuid(0x80000000) - return eax -} - -func maxFunctionID() uint32 { - a, _, _, _ := cpuid(0) - return a -} - -func brandName() string { - if maxExtendedFunction() >= 0x80000004 { - v := make([]uint32, 0, 48) - for i := uint32(0); i < 3; i++ { - a, b, c, d := cpuid(0x80000002 + i) - v = append(v, a, b, c, d) - } - return strings.Trim(string(valAsString(v...)), " ") - } - return "unknown" -} - -func threadsPerCore() int { - mfi := maxFunctionID() - vend, _ := vendorID() - - if mfi < 0x4 || (vend != Intel && vend != AMD) { - return 1 - } - - if mfi < 0xb { - if vend != Intel { - return 1 - } - _, b, _, d := cpuid(1) - if (d & (1 << 28)) != 0 { - // v will contain logical core count - v := (b >> 16) & 255 - if v > 1 { - a4, _, _, _ := cpuid(4) - // physical cores - v2 := (a4 >> 26) + 1 - if v2 > 0 { - return int(v) / int(v2) - } - } - } - return 1 - } - _, b, _, _ := cpuidex(0xb, 0) - if b&0xffff == 0 { - return 1 - } - return int(b & 0xffff) -} - -func logicalCores() int { - mfi := maxFunctionID() - v, _ := vendorID() - switch v { - case Intel: - // Use this on old Intel processors - if mfi < 0xb { - if mfi < 1 { - return 0 - } - // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) - // that can be assigned to logical processors in a physical package. - // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. - _, ebx, _, _ := cpuid(1) - logical := (ebx >> 16) & 0xff - return int(logical) - } - _, b, _, _ := cpuidex(0xb, 1) - return int(b & 0xffff) - case AMD, Hygon: - _, b, _, _ := cpuid(1) - return int((b >> 16) & 0xff) - default: - return 0 - } -} - -func familyModel() (int, int) { - if maxFunctionID() < 0x1 { - return 0, 0 - } - eax, _, _, _ := cpuid(1) - family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) - model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) - return int(family), int(model) -} - -func physicalCores() int { - v, _ := vendorID() - switch v { - case Intel: - return logicalCores() / threadsPerCore() - case AMD, Hygon: - lc := logicalCores() - tpc := threadsPerCore() - if lc > 0 && tpc > 0 { - return lc / tpc - } - // The following is inaccurate on AMD EPYC 7742 64-Core Processor - - if maxExtendedFunction() >= 0x80000008 { - _, _, c, _ := cpuid(0x80000008) - return int(c&0xff) + 1 - } - } - return 0 -} - -// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID -var vendorMapping = map[string]Vendor{ - "AMDisbetter!": AMD, - "AuthenticAMD": AMD, - "CentaurHauls": VIA, - "GenuineIntel": Intel, - "TransmetaCPU": Transmeta, - "GenuineTMx86": Transmeta, - "Geode by NSC": NSC, - "VIA VIA VIA ": VIA, - "KVMKVMKVMKVM": KVM, - "Microsoft Hv": MSVM, - "VMwareVMware": VMware, - "XenVMMXenVMM": XenHVM, - "bhyve bhyve ": Bhyve, - "HygonGenuine": Hygon, - "Vortex86 SoC": SiS, - "SiS SiS SiS ": SiS, - "RiseRiseRise": SiS, - "Genuine RDC": RDC, -} - -func vendorID() (Vendor, string) { - _, b, c, d := cpuid(0) - v := string(valAsString(b, d, c)) - vend, ok := vendorMapping[v] - if !ok { - return Other, v - } - return vend, v -} - -func cacheLine() int { - if maxFunctionID() < 0x1 { - return 0 - } - - _, ebx, _, _ := cpuid(1) - cache := (ebx & 0xff00) >> 5 // cflush size - if cache == 0 && maxExtendedFunction() >= 0x80000006 { - _, _, ecx, _ := cpuid(0x80000006) - cache = ecx & 0xff // cacheline size - } - // TODO: Read from Cache and TLB Information - return int(cache) -} - -func (c *CPUInfo) cacheSize() { - c.Cache.L1D = -1 - c.Cache.L1I = -1 - c.Cache.L2 = -1 - c.Cache.L3 = -1 - vendor, _ := vendorID() - switch vendor { - case Intel: - if maxFunctionID() < 4 { - return - } - for i := uint32(0); ; i++ { - eax, ebx, ecx, _ := cpuidex(4, i) - cacheType := eax & 15 - if cacheType == 0 { - break - } - cacheLevel := (eax >> 5) & 7 - coherency := int(ebx&0xfff) + 1 - partitions := int((ebx>>12)&0x3ff) + 1 - associativity := int((ebx>>22)&0x3ff) + 1 - sets := int(ecx) + 1 - size := associativity * partitions * coherency * sets - switch cacheLevel { - case 1: - if cacheType == 1 { - // 1 = Data Cache - c.Cache.L1D = size - } else if cacheType == 2 { - // 2 = Instruction Cache - c.Cache.L1I = size - } else { - if c.Cache.L1D < 0 { - c.Cache.L1I = size - } - if c.Cache.L1I < 0 { - c.Cache.L1I = size - } - } - case 2: - c.Cache.L2 = size - case 3: - c.Cache.L3 = size - } - } - case AMD, Hygon: - // Untested. - if maxExtendedFunction() < 0x80000005 { - return - } - _, _, ecx, edx := cpuid(0x80000005) - c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) - c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) - - if maxExtendedFunction() < 0x80000006 { - return - } - _, _, ecx, _ = cpuid(0x80000006) - c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) - - // CPUID Fn8000_001D_EAX_x[N:0] Cache Properties - if maxExtendedFunction() < 0x8000001D { - return - } - for i := uint32(0); i < math.MaxUint32; i++ { - eax, ebx, ecx, _ := cpuidex(0x8000001D, i) - - level := (eax >> 5) & 7 - cacheNumSets := ecx + 1 - cacheLineSize := 1 + (ebx & 2047) - cachePhysPartitions := 1 + ((ebx >> 12) & 511) - cacheNumWays := 1 + ((ebx >> 22) & 511) - - typ := eax & 15 - size := int(cacheNumSets * cacheLineSize * cachePhysPartitions * cacheNumWays) - if typ == 0 { - return - } - - switch level { - case 1: - switch typ { - case 1: - // Data cache - c.Cache.L1D = size - case 2: - // Inst cache - c.Cache.L1I = size - default: - if c.Cache.L1D < 0 { - c.Cache.L1I = size - } - if c.Cache.L1I < 0 { - c.Cache.L1I = size - } - } - case 2: - c.Cache.L2 = size - case 3: - c.Cache.L3 = size - } - } - } - - return -} - -type SGXEPCSection struct { - BaseAddress uint64 - EPCSize uint64 -} - -type SGXSupport struct { - Available bool - LaunchControl bool - SGX1Supported bool - SGX2Supported bool - MaxEnclaveSizeNot64 int64 - MaxEnclaveSize64 int64 - EPCSections []SGXEPCSection -} - -func hasSGX(available, lc bool) (rval SGXSupport) { - rval.Available = available - - if !available { - return - } - - rval.LaunchControl = lc - - a, _, _, d := cpuidex(0x12, 0) - rval.SGX1Supported = a&0x01 != 0 - rval.SGX2Supported = a&0x02 != 0 - rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 - rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 - rval.EPCSections = make([]SGXEPCSection, 0) - - for subleaf := uint32(2); subleaf < 2+8; subleaf++ { - eax, ebx, ecx, edx := cpuidex(0x12, subleaf) - leafType := eax & 0xf - - if leafType == 0 { - // Invalid subleaf, stop iterating - break - } else if leafType == 1 { - // EPC Section subleaf - baseAddress := uint64(eax&0xfffff000) + (uint64(ebx&0x000fffff) << 32) - size := uint64(ecx&0xfffff000) + (uint64(edx&0x000fffff) << 32) - - section := SGXEPCSection{BaseAddress: baseAddress, EPCSize: size} - rval.EPCSections = append(rval.EPCSections, section) - } - } - - return -} - -func support() Flags { - mfi := maxFunctionID() - vend, _ := vendorID() - if mfi < 0x1 { - return 0 - } - rval := uint64(0) - _, _, c, d := cpuid(1) - if (d & (1 << 15)) != 0 { - rval |= CMOV - } - if (d & (1 << 23)) != 0 { - rval |= MMX - } - if (d & (1 << 25)) != 0 { - rval |= MMXEXT - } - if (d & (1 << 25)) != 0 { - rval |= SSE - } - if (d & (1 << 26)) != 0 { - rval |= SSE2 - } - if (c & 1) != 0 { - rval |= SSE3 - } - if (c & (1 << 5)) != 0 { - rval |= VMX - } - if (c & 0x00000200) != 0 { - rval |= SSSE3 - } - if (c & 0x00080000) != 0 { - rval |= SSE4 - } - if (c & 0x00100000) != 0 { - rval |= SSE42 - } - if (c & (1 << 25)) != 0 { - rval |= AESNI - } - if (c & (1 << 1)) != 0 { - rval |= CLMUL - } - if c&(1<<23) != 0 { - rval |= POPCNT - } - if c&(1<<30) != 0 { - rval |= RDRAND - } - if c&(1<<29) != 0 { - rval |= F16C - } - if c&(1<<13) != 0 { - rval |= CX16 - } - if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { - if threadsPerCore() > 1 { - rval |= HTT - } - } - if vend == AMD && (d&(1<<28)) != 0 && mfi >= 4 { - if threadsPerCore() > 1 { - rval |= HTT - } - } - // Check XGETBV, OXSAVE and AVX bits - if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { - // Check for OS support - eax, _ := xgetbv(0) - if (eax & 0x6) == 0x6 { - rval |= AVX - if (c & 0x00001000) != 0 { - rval |= FMA3 - } - } - } - - // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. - if mfi >= 7 { - _, ebx, ecx, edx := cpuidex(7, 0) - eax1, _, _, _ := cpuidex(7, 1) - if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { - rval |= AVX2 - } - if (ebx & 0x00000008) != 0 { - rval |= BMI1 - if (ebx & 0x00000100) != 0 { - rval |= BMI2 - } - } - if ebx&(1<<2) != 0 { - rval |= SGX - } - if ebx&(1<<4) != 0 { - rval |= HLE - } - if ebx&(1<<9) != 0 { - rval |= ERMS - } - if ebx&(1<<11) != 0 { - rval |= RTM - } - if ebx&(1<<14) != 0 { - rval |= MPX - } - if ebx&(1<<18) != 0 { - rval |= RDSEED - } - if ebx&(1<<19) != 0 { - rval |= ADX - } - if ebx&(1<<29) != 0 { - rval |= SHA - } - if edx&(1<<26) != 0 { - rval |= IBPB - } - if ecx&(1<<30) != 0 { - rval |= SGXLC - } - if edx&(1<<27) != 0 { - rval |= STIBP - } - - // Only detect AVX-512 features if XGETBV is supported - if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { - // Check for OS support - eax, _ := xgetbv(0) - - // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and - // ZMM16-ZMM31 state are enabled by OS) - /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). - if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { - if ebx&(1<<16) != 0 { - rval |= AVX512F - } - if ebx&(1<<17) != 0 { - rval |= AVX512DQ - } - if ebx&(1<<21) != 0 { - rval |= AVX512IFMA - } - if ebx&(1<<26) != 0 { - rval |= AVX512PF - } - if ebx&(1<<27) != 0 { - rval |= AVX512ER - } - if ebx&(1<<28) != 0 { - rval |= AVX512CD - } - if ebx&(1<<30) != 0 { - rval |= AVX512BW - } - if ebx&(1<<31) != 0 { - rval |= AVX512VL - } - // ecx - if ecx&(1<<1) != 0 { - rval |= AVX512VBMI - } - if ecx&(1<<6) != 0 { - rval |= AVX512VBMI2 - } - if ecx&(1<<8) != 0 { - rval |= GFNI - } - if ecx&(1<<9) != 0 { - rval |= VAES - } - if ecx&(1<<10) != 0 { - rval |= VPCLMULQDQ - } - if ecx&(1<<11) != 0 { - rval |= AVX512VNNI - } - if ecx&(1<<12) != 0 { - rval |= AVX512BITALG - } - if ecx&(1<<14) != 0 { - rval |= AVX512VPOPCNTDQ - } - // edx - if edx&(1<<8) != 0 { - rval |= AVX512VP2INTERSECT - } - // cpuid eax 07h,ecx=1 - if eax1&(1<<5) != 0 { - rval |= AVX512BF16 - } - } - } - } - - if maxExtendedFunction() >= 0x80000001 { - _, _, c, d := cpuid(0x80000001) - if (c & (1 << 5)) != 0 { - rval |= LZCNT - rval |= POPCNT - } - if (d & (1 << 31)) != 0 { - rval |= AMD3DNOW - } - if (d & (1 << 30)) != 0 { - rval |= AMD3DNOWEXT - } - if (d & (1 << 23)) != 0 { - rval |= MMX - } - if (d & (1 << 22)) != 0 { - rval |= MMXEXT - } - if (c & (1 << 6)) != 0 { - rval |= SSE4A - } - if d&(1<<20) != 0 { - rval |= NX - } - if d&(1<<27) != 0 { - rval |= RDTSCP - } - - /* Allow for selectively disabling SSE2 functions on AMD processors - with SSE2 support but not SSE4a. This includes Athlon64, some - Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster - than SSE2 often enough to utilize this special-case flag. - AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case - so that SSE2 is used unless explicitly disabled by checking - AV_CPU_FLAG_SSE2SLOW. */ - if vend != Intel && - rval&SSE2 != 0 && (c&0x00000040) == 0 { - rval |= SSE2SLOW - } - - /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be - * used unless the OS has AVX support. */ - if (rval & AVX) != 0 { - if (c & 0x00000800) != 0 { - rval |= XOP - } - if (c & 0x00010000) != 0 { - rval |= FMA4 - } - } - - if vend == Intel { - family, model := familyModel() - if family == 6 && (model == 9 || model == 13 || model == 14) { - /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and - * 6/14 (core1 "yonah") theoretically support sse2, but it's - * usually slower than mmx. */ - if (rval & SSE2) != 0 { - rval |= SSE2SLOW - } - if (rval & SSE3) != 0 { - rval |= SSE3SLOW - } - } - /* The Atom processor has SSSE3 support, which is useful in many cases, - * but sometimes the SSSE3 version is slower than the SSE2 equivalent - * on the Atom, but is generally faster on other processors supporting - * SSSE3. This flag allows for selectively disabling certain SSSE3 - * functions on the Atom. */ - if family == 6 && model == 28 { - rval |= ATOM - } - } - } - return Flags(rval) -} - -func valAsString(values ...uint32) []byte { - r := make([]byte, 4*len(values)) - for i, v := range values { - dst := r[i*4:] - dst[0] = byte(v & 0xff) - dst[1] = byte((v >> 8) & 0xff) - dst[2] = byte((v >> 16) & 0xff) - dst[3] = byte((v >> 24) & 0xff) - switch { - case dst[0] == 0: - return r[:i*4] - case dst[1] == 0: - return r[:i*4+1] - case dst[2] == 0: - return r[:i*4+2] - case dst[3] == 0: - return r[:i*4+3] - } - } - return r -} - -// Single-precision and double-precision floating point -func (c CPUInfo) ArmFP() bool { - return c.Arm&FP != 0 -} - -// Advanced SIMD -func (c CPUInfo) ArmASIMD() bool { - return c.Arm&ASIMD != 0 -} - -// Generic timer -func (c CPUInfo) ArmEVTSTRM() bool { - return c.Arm&EVTSTRM != 0 -} - -// AES instructions -func (c CPUInfo) ArmAES() bool { - return c.Arm&AES != 0 -} - -// Polynomial Multiply instructions (PMULL/PMULL2) -func (c CPUInfo) ArmPMULL() bool { - return c.Arm&PMULL != 0 -} - -// SHA-1 instructions (SHA1C, etc) -func (c CPUInfo) ArmSHA1() bool { - return c.Arm&SHA1 != 0 -} - -// SHA-2 instructions (SHA256H, etc) -func (c CPUInfo) ArmSHA2() bool { - return c.Arm&SHA2 != 0 -} - -// CRC32/CRC32C instructions -func (c CPUInfo) ArmCRC32() bool { - return c.Arm&CRC32 != 0 -} - -// Large System Extensions (LSE) -func (c CPUInfo) ArmATOMICS() bool { - return c.Arm&ATOMICS != 0 -} - -// Half-precision floating point -func (c CPUInfo) ArmFPHP() bool { - return c.Arm&FPHP != 0 -} - -// Advanced SIMD half-precision floating point -func (c CPUInfo) ArmASIMDHP() bool { - return c.Arm&ASIMDHP != 0 -} - -// Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) -func (c CPUInfo) ArmASIMDRDM() bool { - return c.Arm&ASIMDRDM != 0 -} - -// Javascript-style double->int convert (FJCVTZS) -func (c CPUInfo) ArmJSCVT() bool { - return c.Arm&JSCVT != 0 -} - -// Floatin point complex number addition and multiplication -func (c CPUInfo) ArmFCMA() bool { - return c.Arm&FCMA != 0 -} - -// Weaker release consistency (LDAPR, etc) -func (c CPUInfo) ArmLRCPC() bool { - return c.Arm&LRCPC != 0 -} - -// Data cache clean to Point of Persistence (DC CVAP) -func (c CPUInfo) ArmDCPOP() bool { - return c.Arm&DCPOP != 0 -} - -// SHA-3 instructions (EOR3, RAXI, XAR, BCAX) -func (c CPUInfo) ArmSHA3() bool { - return c.Arm&SHA3 != 0 -} - -// SM3 instructions -func (c CPUInfo) ArmSM3() bool { - return c.Arm&SM3 != 0 -} - -// SM4 instructions -func (c CPUInfo) ArmSM4() bool { - return c.Arm&SM4 != 0 -} - -// SIMD Dot Product -func (c CPUInfo) ArmASIMDDP() bool { - return c.Arm&ASIMDDP != 0 -} - -// SHA512 instructions -func (c CPUInfo) ArmSHA512() bool { - return c.Arm&SHA512 != 0 -} - -// Scalable Vector Extension -func (c CPUInfo) ArmSVE() bool { - return c.Arm&SVE != 0 -} - -// Generic Pointer Authentication -func (c CPUInfo) ArmGPA() bool { - return c.Arm&GPA != 0 -} diff --git a/mantle/vendor/github.com/klauspost/cpuid/cpuid_386.s b/mantle/vendor/github.com/klauspost/cpuid/cpuid_386.s deleted file mode 100644 index 089638f5..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/cpuid_386.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build 386,!gccgo,!noasm,!appengine - -// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuid(SB), 7, $0 - XORL CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+4(FP) - MOVL BX, ebx+8(FP) - MOVL CX, ecx+12(FP) - MOVL DX, edx+16(FP) - RET - -// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·asmXgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+4(FP) - MOVL DX, edx+8(FP) - RET - -// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -TEXT ·asmRdtscpAsm(SB), 7, $0 - BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP - MOVL AX, eax+0(FP) - MOVL BX, ebx+4(FP) - MOVL CX, ecx+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/mantle/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/mantle/vendor/github.com/klauspost/cpuid/cpuid_amd64.s deleted file mode 100644 index 3ba0559e..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/cpuid_amd64.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build amd64,!gccgo,!noasm,!appengine - -// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuid(SB), 7, $0 - XORQ CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func asmXgetbv(index uint32) (eax, edx uint32) -TEXT ·asmXgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+8(FP) - MOVL DX, edx+12(FP) - RET - -// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -TEXT ·asmRdtscpAsm(SB), 7, $0 - BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP - MOVL AX, eax+0(FP) - MOVL BX, ebx+4(FP) - MOVL CX, ecx+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/mantle/vendor/github.com/klauspost/cpuid/cpuid_arm64.s b/mantle/vendor/github.com/klauspost/cpuid/cpuid_arm64.s deleted file mode 100644 index 8975ee8d..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/cpuid_arm64.s +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build arm64,!gccgo - -// See https://www.kernel.org/doc/Documentation/arm64/cpu-feature-registers.txt - -// func getMidr -TEXT ·getMidr(SB), 7, $0 - WORD $0xd5380000 // mrs x0, midr_el1 /* Main ID Register */ - MOVD R0, midr+0(FP) - RET - -// func getProcFeatures -TEXT ·getProcFeatures(SB), 7, $0 - WORD $0xd5380400 // mrs x0, id_aa64pfr0_el1 /* Processor Feature Register 0 */ - MOVD R0, procFeatures+0(FP) - RET - -// func getInstAttributes -TEXT ·getInstAttributes(SB), 7, $0 - WORD $0xd5380600 // mrs x0, id_aa64isar0_el1 /* Instruction Set Attribute Register 0 */ - WORD $0xd5380621 // mrs x1, id_aa64isar1_el1 /* Instruction Set Attribute Register 1 */ - MOVD R0, instAttrReg0+0(FP) - MOVD R1, instAttrReg1+8(FP) - RET - diff --git a/mantle/vendor/github.com/klauspost/cpuid/detect_arm64.go b/mantle/vendor/github.com/klauspost/cpuid/detect_arm64.go deleted file mode 100644 index 923a8261..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/detect_arm64.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build arm64,!gccgo,!noasm,!appengine - -package cpuid - -func getMidr() (midr uint64) -func getProcFeatures() (procFeatures uint64) -func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) - -func initCPU() { - cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } - cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } - xgetbv = func(uint32) (a, b uint32) { return 0, 0 } - rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } -} - -func addInfo(c *CPUInfo) { - // ARM64 disabled for now. - if true { - return - } - // midr := getMidr() - - // MIDR_EL1 - Main ID Register - // x--------------------------------------------------x - // | Name | bits | visible | - // |--------------------------------------------------| - // | Implementer | [31-24] | y | - // |--------------------------------------------------| - // | Variant | [23-20] | y | - // |--------------------------------------------------| - // | Architecture | [19-16] | y | - // |--------------------------------------------------| - // | PartNum | [15-4] | y | - // |--------------------------------------------------| - // | Revision | [3-0] | y | - // x--------------------------------------------------x - - // fmt.Printf(" implementer: 0x%02x\n", (midr>>24)&0xff) - // fmt.Printf(" variant: 0x%01x\n", (midr>>20)&0xf) - // fmt.Printf("architecture: 0x%01x\n", (midr>>16)&0xf) - // fmt.Printf(" part num: 0x%03x\n", (midr>>4)&0xfff) - // fmt.Printf(" revision: 0x%01x\n", (midr>>0)&0xf) - - procFeatures := getProcFeatures() - - // ID_AA64PFR0_EL1 - Processor Feature Register 0 - // x--------------------------------------------------x - // | Name | bits | visible | - // |--------------------------------------------------| - // | DIT | [51-48] | y | - // |--------------------------------------------------| - // | SVE | [35-32] | y | - // |--------------------------------------------------| - // | GIC | [27-24] | n | - // |--------------------------------------------------| - // | AdvSIMD | [23-20] | y | - // |--------------------------------------------------| - // | FP | [19-16] | y | - // |--------------------------------------------------| - // | EL3 | [15-12] | n | - // |--------------------------------------------------| - // | EL2 | [11-8] | n | - // |--------------------------------------------------| - // | EL1 | [7-4] | n | - // |--------------------------------------------------| - // | EL0 | [3-0] | n | - // x--------------------------------------------------x - - var f ArmFlags - // if procFeatures&(0xf<<48) != 0 { - // fmt.Println("DIT") - // } - if procFeatures&(0xf<<32) != 0 { - f |= SVE - } - if procFeatures&(0xf<<20) != 15<<20 { - f |= ASIMD - if procFeatures&(0xf<<20) == 1<<20 { - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1 - // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic. - f |= FPHP - f |= ASIMDHP - } - } - if procFeatures&(0xf<<16) != 0 { - f |= FP - } - - instAttrReg0, instAttrReg1 := getInstAttributes() - - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 - // - // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0 - // x--------------------------------------------------x - // | Name | bits | visible | - // |--------------------------------------------------| - // | TS | [55-52] | y | - // |--------------------------------------------------| - // | FHM | [51-48] | y | - // |--------------------------------------------------| - // | DP | [47-44] | y | - // |--------------------------------------------------| - // | SM4 | [43-40] | y | - // |--------------------------------------------------| - // | SM3 | [39-36] | y | - // |--------------------------------------------------| - // | SHA3 | [35-32] | y | - // |--------------------------------------------------| - // | RDM | [31-28] | y | - // |--------------------------------------------------| - // | ATOMICS | [23-20] | y | - // |--------------------------------------------------| - // | CRC32 | [19-16] | y | - // |--------------------------------------------------| - // | SHA2 | [15-12] | y | - // |--------------------------------------------------| - // | SHA1 | [11-8] | y | - // |--------------------------------------------------| - // | AES | [7-4] | y | - // x--------------------------------------------------x - - // if instAttrReg0&(0xf<<52) != 0 { - // fmt.Println("TS") - // } - // if instAttrReg0&(0xf<<48) != 0 { - // fmt.Println("FHM") - // } - if instAttrReg0&(0xf<<44) != 0 { - f |= ASIMDDP - } - if instAttrReg0&(0xf<<40) != 0 { - f |= SM4 - } - if instAttrReg0&(0xf<<36) != 0 { - f |= SM3 - } - if instAttrReg0&(0xf<<32) != 0 { - f |= SHA3 - } - if instAttrReg0&(0xf<<28) != 0 { - f |= ASIMDRDM - } - if instAttrReg0&(0xf<<20) != 0 { - f |= ATOMICS - } - if instAttrReg0&(0xf<<16) != 0 { - f |= CRC32 - } - if instAttrReg0&(0xf<<12) != 0 { - f |= SHA2 - } - if instAttrReg0&(0xf<<12) == 2<<12 { - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 - // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. - f |= SHA512 - } - if instAttrReg0&(0xf<<8) != 0 { - f |= SHA1 - } - if instAttrReg0&(0xf<<4) != 0 { - f |= AES - } - if instAttrReg0&(0xf<<4) == 2<<4 { - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 - // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities. - f |= PMULL - } - - // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1 - // - // ID_AA64ISAR1_EL1 - Instruction set attribute register 1 - // x--------------------------------------------------x - // | Name | bits | visible | - // |--------------------------------------------------| - // | GPI | [31-28] | y | - // |--------------------------------------------------| - // | GPA | [27-24] | y | - // |--------------------------------------------------| - // | LRCPC | [23-20] | y | - // |--------------------------------------------------| - // | FCMA | [19-16] | y | - // |--------------------------------------------------| - // | JSCVT | [15-12] | y | - // |--------------------------------------------------| - // | API | [11-8] | y | - // |--------------------------------------------------| - // | APA | [7-4] | y | - // |--------------------------------------------------| - // | DPB | [3-0] | y | - // x--------------------------------------------------x - - // if instAttrReg1&(0xf<<28) != 0 { - // fmt.Println("GPI") - // } - if instAttrReg1&(0xf<<28) != 24 { - f |= GPA - } - if instAttrReg1&(0xf<<20) != 0 { - f |= LRCPC - } - if instAttrReg1&(0xf<<16) != 0 { - f |= FCMA - } - if instAttrReg1&(0xf<<12) != 0 { - f |= JSCVT - } - // if instAttrReg1&(0xf<<8) != 0 { - // fmt.Println("API") - // } - // if instAttrReg1&(0xf<<4) != 0 { - // fmt.Println("APA") - // } - if instAttrReg1&(0xf<<0) != 0 { - f |= DCPOP - } - c.Arm = f -} diff --git a/mantle/vendor/github.com/klauspost/cpuid/detect_intel.go b/mantle/vendor/github.com/klauspost/cpuid/detect_intel.go deleted file mode 100644 index 363951b3..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/detect_intel.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build 386,!gccgo,!noasm amd64,!gccgo,!noasm,!appengine - -package cpuid - -func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func asmXgetbv(index uint32) (eax, edx uint32) -func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) - -func initCPU() { - cpuid = asmCpuid - cpuidex = asmCpuidex - xgetbv = asmXgetbv - rdtscpAsm = asmRdtscpAsm -} - -func addInfo(c *CPUInfo) { - c.maxFunc = maxFunctionID() - c.maxExFunc = maxExtendedFunction() - c.BrandName = brandName() - c.CacheLine = cacheLine() - c.Family, c.Model = familyModel() - c.Features = support() - c.SGX = hasSGX(c.Features&SGX != 0, c.Features&SGXLC != 0) - c.ThreadsPerCore = threadsPerCore() - c.LogicalCores = logicalCores() - c.PhysicalCores = physicalCores() - c.VendorID, c.VendorString = vendorID() - c.Hz = hertz(c.BrandName) - c.cacheSize() -} diff --git a/mantle/vendor/github.com/klauspost/cpuid/detect_ref.go b/mantle/vendor/github.com/klauspost/cpuid/detect_ref.go deleted file mode 100644 index 970ff3d2..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/detect_ref.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build !amd64,!386,!arm64 gccgo noasm appengine - -package cpuid - -func initCPU() { - cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } - cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } - xgetbv = func(uint32) (a, b uint32) { return 0, 0 } - rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } -} - -func addInfo(info *CPUInfo) {} diff --git a/mantle/vendor/github.com/klauspost/cpuid/go.mod b/mantle/vendor/github.com/klauspost/cpuid/go.mod deleted file mode 100644 index 55563f2a..00000000 --- a/mantle/vendor/github.com/klauspost/cpuid/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/klauspost/cpuid - -go 1.12 diff --git a/mantle/vendor/github.com/minio/md5-simd/LICENSE b/mantle/vendor/github.com/minio/md5-simd/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/mantle/vendor/github.com/minio/md5-simd/README.md b/mantle/vendor/github.com/minio/md5-simd/README.md deleted file mode 100644 index 374214d1..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/README.md +++ /dev/null @@ -1,196 +0,0 @@ - -# md5-simd - -This is a SIMD accelerated MD5 package, allowing up to either 8 (AVX2) or 16 (AVX512) independent MD5 sums to be calculated on a single CPU core. - -It was originally based on the [md5vec](https://github.com/igneous-systems/md5vec) repository by Igneous Systems, but has been made more flexible by amongst others supporting different message sizes per lane and adding AVX512. - -`md5-simd` integrates a similar mechanism as described in [minio/sha256-simd](https://github.com/minio/sha256-simd#support-for-avx512) for making it easy for clients to take advantages of the parallel nature of the MD5 calculation. This will result in reduced overall CPU load. - -It is important to understand that `md5-simd` **does not speed up** a single threaded MD5 hash sum. -Rather it allows multiple __independent__ MD5 sums to be computed in parallel on the same CPU core, -thereby making more efficient usage of the computing resources. - -## Usage - -[![Documentation](https://godoc.org/github.com/minio/md5-simd?status.svg)](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc) - - -In order to use `md5-simd`, you must first create an `Server` which can be -used to instantiate one or more objects for MD5 hashing. - -These objects conform to the regular [`hash.Hash`](https://pkg.go.dev/hash?tab=doc#Hash) interface -and as such the normal Write/Reset/Sum functionality works as expected. - -As an example: -``` - // Create server - server := md5simd.NewServer() - defer server.Close() - - // Create hashing object (conforming to hash.Hash) - md5Hash := server.NewHash() - defer md5Hash.Close() - - // Write one (or more) blocks - md5Hash.Write(block) - - // Return digest - digest := md5Hash.Sum([]byte{}) -``` - -To keep performance both a [Server](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Server) -and individual [Hasher](https://pkg.go.dev/github.com/minio/md5-simd?tab=doc#Hasher) should -be closed using the `Close()` function when no longer needed. - -A Hasher can efficiently be re-used by using [`Reset()`](https://pkg.go.dev/hash?tab=doc#Hash) functionality. - -In case your system does not support the instructions required it will fall back to using `crypto/md5` for hashing. - -## Limitations - -As explained above `md5-simd` does not speed up an individual MD5 hash sum computation, -unless some hierarchical tree construct is used but this will result in different outcomes. -Running a single hash on a server results in approximately half the throughput. - -Instead, it allows running multiple MD5 calculations in parallel on a single CPU core. -This can be beneficial in e.g. multi-threaded server applications where many go-routines -are dealing with many requests and multiple MD5 calculations can be packed/scheduled for parallel execution on a single core. - -This will result in a lower overall CPU usage as compared to using the standard `crypto/md5` -functionality where each MD5 hash computation will consume a single thread (core). - -It is best to test and measure the overall CPU usage in a representative usage scenario in your application -to get an overall understanding of the benefits of `md5-simd` as compared to `crypto/md5`, ideally under heavy CPU load. - -Also note that `md5-simd` is best meant to work with large objects, -so if your application only hashes small objects of a few kilobytes -you may be better of by using `crypto/md5`. - -## Performance - -For the best performance writes should be a multiple of 64 bytes, ideally a multiple of 32KB. -To help with that a [`buffered := bufio.NewWriterSize(hasher, 32<<10)`](https://golang.org/pkg/bufio/#NewWriterSize) -can be inserted if you are unsure of the sizes of the writes. -Remember to [flush](https://golang.org/pkg/bufio/#Writer.Flush) `buffered` before reading the hash. - -A single 'server' can process 16 streams concurrently with 1 core (AVX-512) or 2 cores (AVX2). -In situations where it is likely that more than 16 streams are fully loaded it may be beneficial -to use multiple servers. - -The following chart compares the multi-core performance between `crypto/md5` vs the AVX2 vs the AVX512 code: - -![md5-performance-overview](chart/Multi-core-MD5-Aggregated-Hashing-Performance.png) - -Compared to `crypto/md5`, the AVX2 version is up to 4x faster: - -``` -$ benchcmp crypto-md5.txt avx2.txt -benchmark old MB/s new MB/s speedup -BenchmarkParallel/32KB-4 2229.22 7370.50 3.31x -BenchmarkParallel/64KB-4 2233.61 8248.46 3.69x -BenchmarkParallel/128KB-4 2235.43 8660.74 3.87x -BenchmarkParallel/256KB-4 2236.39 8863.87 3.96x -BenchmarkParallel/512KB-4 2238.05 8985.39 4.01x -BenchmarkParallel/1MB-4 2233.56 9042.62 4.05x -BenchmarkParallel/2MB-4 2224.11 9014.46 4.05x -BenchmarkParallel/4MB-4 2199.78 8993.61 4.09x -BenchmarkParallel/8MB-4 2182.48 8748.22 4.01x -``` - -Compared to `crypto/md5`, the AVX512 is up to 8x faster (for larger block sizes): - -``` -$ benchcmp crypto-md5.txt avx512.txt -benchmark old MB/s new MB/s speedup -BenchmarkParallel/32KB-4 2229.22 11605.78 5.21x -BenchmarkParallel/64KB-4 2233.61 14329.65 6.42x -BenchmarkParallel/128KB-4 2235.43 16166.39 7.23x -BenchmarkParallel/256KB-4 2236.39 15570.09 6.96x -BenchmarkParallel/512KB-4 2238.05 16705.83 7.46x -BenchmarkParallel/1MB-4 2233.56 16941.95 7.59x -BenchmarkParallel/2MB-4 2224.11 17136.01 7.70x -BenchmarkParallel/4MB-4 2199.78 17218.61 7.83x -BenchmarkParallel/8MB-4 2182.48 17252.88 7.91x -``` - -These measurements were performed on AWS EC2 instance of type `c5.xlarge` equipped with a Xeon Platinum 8124M CPU at 3.0 GHz. - - -## Operation - -To make operation as easy as possible there is a “Server” coordinating everything. The server keeps track of individual hash states and updates them as new data comes in. This can be visualized as follows: - -![server-architecture](chart/server-architecture.png) - -The data is sent to the server from each hash input in blocks of up to 32KB per round. In our testing we found this to be the block size that yielded the best results. - -Whenever there is data available the server will collect data for up to 16 hashes and process all 16 lanes in parallel. This means that if 16 hashes have data available all the lanes will be filled. However since that may not be the case, the server will fill less lanes and do a round anyway. Lanes can also be partially filled if less than 32KB of data is written. - -![server-lanes-example](chart/server-lanes-example.png) - -In this example 4 lanes are fully filled and 2 lanes are partially filled. In this case the black areas will simply be masked out from the results and ignored. This is also why calculating a single hash on a server will not result in any speedup and hash writes should be a multiple of 32KB for the best performance. - -For AVX512 all 16 calculations will be done on a single core, on AVX2 on 2 cores if there is data for more than 8 lanes. -So for optimal usage there should be data available for all 16 hashes. It may be perfectly reasonable to use more than 16 concurrent hashes. - - -## Design & Tech - -md5-simd has both an AVX2 (8-lane parallel), and an AVX512 (16-lane parallel version) algorithm to accelerate the computation with the following function definitions: -``` -//go:noescape -func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int) - -//go:noescape -func block16(state *uint32, ptrs *int64, mask uint64, n int) -``` - -The AVX2 version is based on the [md5vec](https://github.com/igneous-systems/md5vec) repository and is essentially unchanged except for minor (cosmetic) changes. - -The AVX512 version is derived from the AVX2 version but adds some further optimizations and simplifications. - -### Caching in upper ZMM registers - -The AVX2 version passes in a `cache8` block of memory (about 0.5 KB) for temporary storage of intermediate results during `ROUND1` which are subsequently used during `ROUND2` through to `ROUND4`. - -Since AVX512 has double the amount of registers (32 ZMM registers as compared to 16 YMM registers), it is possible to use the upper 16 ZMM registers for keeping the intermediate states on the CPU. As such, there is no need to pass in a corresponding `cache16` into the AVX512 block function. - -### Direct loading using 64-bit pointers - -The AVX2 uses the `VPGATHERDD` instruction (for YMM) to do a parallel load of 8 lanes using (8 independent) 32-bit offets. Since there is no control over how the 8 slices that are passed into the (Golang) `blockMd5` function are laid out into memory, it is not possible to derive a "base" address and corresponding offsets (all within 32-bits) for all 8 slices. - -As such the AVX2 version uses an interim buffer to collect the byte slices to be hashed from all 8 inut slices and passed this buffer along with (fixed) 32-bit offsets into the assembly code. - -For the AVX512 version this interim buffer is not needed since the AVX512 code uses a pair of `VPGATHERQD` instructions to directly dereference 64-bit pointers (from a base register address that is initialized to zero). - -Note that two load (gather) instructions are needed because the AVX512 version processes 16-lanes in parallel, requiring 16 times 64-bit = 1024 bits in total to be loaded. A simple `VALIGND` and `VPORD` are subsequently used to merge the lower and upper halves together into a single ZMM register (that contains 16 lanes of 32-bit DWORDS). - -### Masking support - -Due to the fact that pointers are passed directly from the Golang slices, we need to protect against NULL pointers. -For this a 16-bit mask is passed in the AVX512 assembly code which is used during the `VPGATHERQD` instructions to mask out lanes that could otherwise result in segment violations. - -### Minor optimizations - -The `roll` macro (three instructions on AVX2) is no longer needed for AVX512 and is replaced by a single `VPROLD` instruction. - -Also several logical operations from the various ROUNDS of the AVX2 version could be combined into a single instruction using ternary logic (with the `VPTERMLOGD` instruction), resulting in a further simplification and speed-up. - -## Low level block function performance - -The benchmark below shows the (single thread) maximum performance of the `block()` function for AVX2 (having 8 lanes) and AVX512 (having 16 lanes). Also the baseline single-core performance from the standard `crypto/md5` package is shown for comparison. - -``` -BenchmarkCryptoMd5-4 687.66 MB/s 0 B/op 0 allocs/op -BenchmarkBlock8-4 4144.80 MB/s 0 B/op 0 allocs/op -BenchmarkBlock16-4 8228.88 MB/s 0 B/op 0 allocs/op -``` - -## License - -`md5-simd` is released under the Apache License v2.0. You can find the complete text in the file LICENSE. - -## Contributing - -Contributions are welcome, please send PRs for any enhancements. \ No newline at end of file diff --git a/mantle/vendor/github.com/minio/md5-simd/block-generic.go b/mantle/vendor/github.com/minio/md5-simd/block-generic.go deleted file mode 100644 index eb333b93..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/block-generic.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by go run gen.go -output md5block.go; DO NOT EDIT. - -package md5simd - -import ( - "encoding/binary" - "math/bits" -) - -type digest struct { - s [4]uint32 - x [BlockSize]byte - nx int - len uint64 -} - -func blockGeneric(dig *digest, p []byte) { - // load state - a, b, c, d := dig.s[0], dig.s[1], dig.s[2], dig.s[3] - - for i := 0; i <= len(p)-BlockSize; i += BlockSize { - // eliminate bounds checks on p - q := p[i:] - q = q[:BlockSize:BlockSize] - - // save current state - aa, bb, cc, dd := a, b, c, d - - // load input block - x0 := binary.LittleEndian.Uint32(q[4*0x0:]) - x1 := binary.LittleEndian.Uint32(q[4*0x1:]) - x2 := binary.LittleEndian.Uint32(q[4*0x2:]) - x3 := binary.LittleEndian.Uint32(q[4*0x3:]) - x4 := binary.LittleEndian.Uint32(q[4*0x4:]) - x5 := binary.LittleEndian.Uint32(q[4*0x5:]) - x6 := binary.LittleEndian.Uint32(q[4*0x6:]) - x7 := binary.LittleEndian.Uint32(q[4*0x7:]) - x8 := binary.LittleEndian.Uint32(q[4*0x8:]) - x9 := binary.LittleEndian.Uint32(q[4*0x9:]) - xa := binary.LittleEndian.Uint32(q[4*0xa:]) - xb := binary.LittleEndian.Uint32(q[4*0xb:]) - xc := binary.LittleEndian.Uint32(q[4*0xc:]) - xd := binary.LittleEndian.Uint32(q[4*0xd:]) - xe := binary.LittleEndian.Uint32(q[4*0xe:]) - xf := binary.LittleEndian.Uint32(q[4*0xf:]) - - // round 1 - a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x0+0xd76aa478, 7) - d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x1+0xe8c7b756, 12) - c = d + bits.RotateLeft32((((a^b)&d)^b)+c+x2+0x242070db, 17) - b = c + bits.RotateLeft32((((d^a)&c)^a)+b+x3+0xc1bdceee, 22) - a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x4+0xf57c0faf, 7) - d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x5+0x4787c62a, 12) - c = d + bits.RotateLeft32((((a^b)&d)^b)+c+x6+0xa8304613, 17) - b = c + bits.RotateLeft32((((d^a)&c)^a)+b+x7+0xfd469501, 22) - a = b + bits.RotateLeft32((((c^d)&b)^d)+a+x8+0x698098d8, 7) - d = a + bits.RotateLeft32((((b^c)&a)^c)+d+x9+0x8b44f7af, 12) - c = d + bits.RotateLeft32((((a^b)&d)^b)+c+xa+0xffff5bb1, 17) - b = c + bits.RotateLeft32((((d^a)&c)^a)+b+xb+0x895cd7be, 22) - a = b + bits.RotateLeft32((((c^d)&b)^d)+a+xc+0x6b901122, 7) - d = a + bits.RotateLeft32((((b^c)&a)^c)+d+xd+0xfd987193, 12) - c = d + bits.RotateLeft32((((a^b)&d)^b)+c+xe+0xa679438e, 17) - b = c + bits.RotateLeft32((((d^a)&c)^a)+b+xf+0x49b40821, 22) - - // round 2 - a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x1+0xf61e2562, 5) - d = a + bits.RotateLeft32((((a^b)&c)^b)+d+x6+0xc040b340, 9) - c = d + bits.RotateLeft32((((d^a)&b)^a)+c+xb+0x265e5a51, 14) - b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x0+0xe9b6c7aa, 20) - a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x5+0xd62f105d, 5) - d = a + bits.RotateLeft32((((a^b)&c)^b)+d+xa+0x02441453, 9) - c = d + bits.RotateLeft32((((d^a)&b)^a)+c+xf+0xd8a1e681, 14) - b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x4+0xe7d3fbc8, 20) - a = b + bits.RotateLeft32((((b^c)&d)^c)+a+x9+0x21e1cde6, 5) - d = a + bits.RotateLeft32((((a^b)&c)^b)+d+xe+0xc33707d6, 9) - c = d + bits.RotateLeft32((((d^a)&b)^a)+c+x3+0xf4d50d87, 14) - b = c + bits.RotateLeft32((((c^d)&a)^d)+b+x8+0x455a14ed, 20) - a = b + bits.RotateLeft32((((b^c)&d)^c)+a+xd+0xa9e3e905, 5) - d = a + bits.RotateLeft32((((a^b)&c)^b)+d+x2+0xfcefa3f8, 9) - c = d + bits.RotateLeft32((((d^a)&b)^a)+c+x7+0x676f02d9, 14) - b = c + bits.RotateLeft32((((c^d)&a)^d)+b+xc+0x8d2a4c8a, 20) - - // round 3 - a = b + bits.RotateLeft32((b^c^d)+a+x5+0xfffa3942, 4) - d = a + bits.RotateLeft32((a^b^c)+d+x8+0x8771f681, 11) - c = d + bits.RotateLeft32((d^a^b)+c+xb+0x6d9d6122, 16) - b = c + bits.RotateLeft32((c^d^a)+b+xe+0xfde5380c, 23) - a = b + bits.RotateLeft32((b^c^d)+a+x1+0xa4beea44, 4) - d = a + bits.RotateLeft32((a^b^c)+d+x4+0x4bdecfa9, 11) - c = d + bits.RotateLeft32((d^a^b)+c+x7+0xf6bb4b60, 16) - b = c + bits.RotateLeft32((c^d^a)+b+xa+0xbebfbc70, 23) - a = b + bits.RotateLeft32((b^c^d)+a+xd+0x289b7ec6, 4) - d = a + bits.RotateLeft32((a^b^c)+d+x0+0xeaa127fa, 11) - c = d + bits.RotateLeft32((d^a^b)+c+x3+0xd4ef3085, 16) - b = c + bits.RotateLeft32((c^d^a)+b+x6+0x04881d05, 23) - a = b + bits.RotateLeft32((b^c^d)+a+x9+0xd9d4d039, 4) - d = a + bits.RotateLeft32((a^b^c)+d+xc+0xe6db99e5, 11) - c = d + bits.RotateLeft32((d^a^b)+c+xf+0x1fa27cf8, 16) - b = c + bits.RotateLeft32((c^d^a)+b+x2+0xc4ac5665, 23) - - // round 4 - a = b + bits.RotateLeft32((c^(b|^d))+a+x0+0xf4292244, 6) - d = a + bits.RotateLeft32((b^(a|^c))+d+x7+0x432aff97, 10) - c = d + bits.RotateLeft32((a^(d|^b))+c+xe+0xab9423a7, 15) - b = c + bits.RotateLeft32((d^(c|^a))+b+x5+0xfc93a039, 21) - a = b + bits.RotateLeft32((c^(b|^d))+a+xc+0x655b59c3, 6) - d = a + bits.RotateLeft32((b^(a|^c))+d+x3+0x8f0ccc92, 10) - c = d + bits.RotateLeft32((a^(d|^b))+c+xa+0xffeff47d, 15) - b = c + bits.RotateLeft32((d^(c|^a))+b+x1+0x85845dd1, 21) - a = b + bits.RotateLeft32((c^(b|^d))+a+x8+0x6fa87e4f, 6) - d = a + bits.RotateLeft32((b^(a|^c))+d+xf+0xfe2ce6e0, 10) - c = d + bits.RotateLeft32((a^(d|^b))+c+x6+0xa3014314, 15) - b = c + bits.RotateLeft32((d^(c|^a))+b+xd+0x4e0811a1, 21) - a = b + bits.RotateLeft32((c^(b|^d))+a+x4+0xf7537e82, 6) - d = a + bits.RotateLeft32((b^(a|^c))+d+xb+0xbd3af235, 10) - c = d + bits.RotateLeft32((a^(d|^b))+c+x2+0x2ad7d2bb, 15) - b = c + bits.RotateLeft32((d^(c|^a))+b+x9+0xeb86d391, 21) - - // add saved state - a += aa - b += bb - c += cc - d += dd - } - - // save state - dig.s[0], dig.s[1], dig.s[2], dig.s[3] = a, b, c, d -} diff --git a/mantle/vendor/github.com/minio/md5-simd/block16_amd64.s b/mantle/vendor/github.com/minio/md5-simd/block16_amd64.s deleted file mode 100644 index d32c1220..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/block16_amd64.s +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2020 MinIO Inc. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// This is the AVX512 implementation of the MD5 block function (16-way parallel) - -#define prep(index) \ - KMOVQ kmask, ktmp \ - VPGATHERDD index*4(base)(ptrs*1), ktmp, mem - -#define ROUND1(a, b, c, d, index, const, shift) \ - VXORPS c, tmp, tmp \ - VPADDD 64*const(consts), a, a \ - VPADDD mem, a, a \ - VPTERNLOGD $0x6C, b, d, tmp \ - prep(index) \ - VPADDD tmp, a, a \ - VPROLD $shift, a, a \ - VMOVAPD c, tmp \ - VPADDD b, a, a - -#define ROUND1noload(a, b, c, d, const, shift) \ - VXORPS c, tmp, tmp \ - VPADDD 64*const(consts), a, a \ - VPADDD mem, a, a \ - VPTERNLOGD $0x6C, b, d, tmp \ - VPADDD tmp, a, a \ - VPROLD $shift, a, a \ - VMOVAPD c, tmp \ - VPADDD b, a, a - -#define ROUND2(a, b, c, d, zreg, const, shift) \ - VPADDD 64*const(consts), a, a \ - VPADDD zreg, a, a \ - VANDNPS c, tmp, tmp \ - VPTERNLOGD $0xEC, b, tmp, tmp2 \ - VMOVAPD c, tmp \ - VPADDD tmp2, a, a \ - VMOVAPD c, tmp2 \ - VPROLD $shift, a, a \ - VPADDD b, a, a - -#define ROUND3(a, b, c, d, zreg, const, shift) \ - VPADDD 64*const(consts), a, a \ - VPADDD zreg, a, a \ - VPTERNLOGD $0x96, b, d, tmp \ - VPADDD tmp, a, a \ - VPROLD $shift, a, a \ - VMOVAPD b, tmp \ - VPADDD b, a, a - -#define ROUND4(a, b, c, d, zreg, const, shift) \ - VPADDD 64*const(consts), a, a \ - VPADDD zreg, a, a \ - VPTERNLOGD $0x36, b, c, tmp \ - VPADDD tmp, a, a \ - VPROLD $shift, a, a \ - VXORPS c, ones, tmp \ - VPADDD b, a, a - -TEXT ·block16(SB),4,$0-40 - - MOVQ state+0(FP), BX - MOVQ base+8(FP), SI - MOVQ ptrs+16(FP), AX - KMOVQ mask+24(FP), K1 - MOVQ n+32(FP), DX - MOVQ ·avx512md5consts+0(SB), DI - -#define a Z0 -#define b Z1 -#define c Z2 -#define d Z3 - -#define sa Z4 -#define sb Z5 -#define sc Z6 -#define sd Z7 - -#define tmp Z8 -#define tmp2 Z9 -#define ptrs Z10 -#define ones Z12 -#define mem Z15 - -#define kmask K1 -#define ktmp K3 - -// ---------------------------------------------------------- -// Registers Z16 through to Z31 are used for caching purposes -// ---------------------------------------------------------- - - -#define dig BX -#define count DX -#define base SI -#define consts DI - - // load digest into state registers - VMOVUPD (dig), a - VMOVUPD 0x40(dig), b - VMOVUPD 0x80(dig), c - VMOVUPD 0xc0(dig), d - - // load source pointers - VMOVUPD 0x00(AX), ptrs - - MOVQ $-1, AX - VPBROADCASTQ AX, ones - -loop: - VMOVAPD a, sa - VMOVAPD b, sb - VMOVAPD c, sc - VMOVAPD d, sd - - prep(0) - VMOVAPD d, tmp - VMOVAPD mem, Z16 - - ROUND1(a,b,c,d, 1,0x00, 7) - VMOVAPD mem, Z17 - ROUND1(d,a,b,c, 2,0x01,12) - VMOVAPD mem, Z18 - ROUND1(c,d,a,b, 3,0x02,17) - VMOVAPD mem, Z19 - ROUND1(b,c,d,a, 4,0x03,22) - VMOVAPD mem, Z20 - ROUND1(a,b,c,d, 5,0x04, 7) - VMOVAPD mem, Z21 - ROUND1(d,a,b,c, 6,0x05,12) - VMOVAPD mem, Z22 - ROUND1(c,d,a,b, 7,0x06,17) - VMOVAPD mem, Z23 - ROUND1(b,c,d,a, 8,0x07,22) - VMOVAPD mem, Z24 - ROUND1(a,b,c,d, 9,0x08, 7) - VMOVAPD mem, Z25 - ROUND1(d,a,b,c,10,0x09,12) - VMOVAPD mem, Z26 - ROUND1(c,d,a,b,11,0x0a,17) - VMOVAPD mem, Z27 - ROUND1(b,c,d,a,12,0x0b,22) - VMOVAPD mem, Z28 - ROUND1(a,b,c,d,13,0x0c, 7) - VMOVAPD mem, Z29 - ROUND1(d,a,b,c,14,0x0d,12) - VMOVAPD mem, Z30 - ROUND1(c,d,a,b,15,0x0e,17) - VMOVAPD mem, Z31 - - ROUND1noload(b,c,d,a, 0x0f,22) - - VMOVAPD d, tmp - VMOVAPD d, tmp2 - - ROUND2(a,b,c,d, Z17,0x10, 5) - ROUND2(d,a,b,c, Z22,0x11, 9) - ROUND2(c,d,a,b, Z27,0x12,14) - ROUND2(b,c,d,a, Z16,0x13,20) - ROUND2(a,b,c,d, Z21,0x14, 5) - ROUND2(d,a,b,c, Z26,0x15, 9) - ROUND2(c,d,a,b, Z31,0x16,14) - ROUND2(b,c,d,a, Z20,0x17,20) - ROUND2(a,b,c,d, Z25,0x18, 5) - ROUND2(d,a,b,c, Z30,0x19, 9) - ROUND2(c,d,a,b, Z19,0x1a,14) - ROUND2(b,c,d,a, Z24,0x1b,20) - ROUND2(a,b,c,d, Z29,0x1c, 5) - ROUND2(d,a,b,c, Z18,0x1d, 9) - ROUND2(c,d,a,b, Z23,0x1e,14) - ROUND2(b,c,d,a, Z28,0x1f,20) - - VMOVAPD c, tmp - - ROUND3(a,b,c,d, Z21,0x20, 4) - ROUND3(d,a,b,c, Z24,0x21,11) - ROUND3(c,d,a,b, Z27,0x22,16) - ROUND3(b,c,d,a, Z30,0x23,23) - ROUND3(a,b,c,d, Z17,0x24, 4) - ROUND3(d,a,b,c, Z20,0x25,11) - ROUND3(c,d,a,b, Z23,0x26,16) - ROUND3(b,c,d,a, Z26,0x27,23) - ROUND3(a,b,c,d, Z29,0x28, 4) - ROUND3(d,a,b,c, Z16,0x29,11) - ROUND3(c,d,a,b, Z19,0x2a,16) - ROUND3(b,c,d,a, Z22,0x2b,23) - ROUND3(a,b,c,d, Z25,0x2c, 4) - ROUND3(d,a,b,c, Z28,0x2d,11) - ROUND3(c,d,a,b, Z31,0x2e,16) - ROUND3(b,c,d,a, Z18,0x2f,23) - - VXORPS d, ones, tmp - - ROUND4(a,b,c,d, Z16,0x30, 6) - ROUND4(d,a,b,c, Z23,0x31,10) - ROUND4(c,d,a,b, Z30,0x32,15) - ROUND4(b,c,d,a, Z21,0x33,21) - ROUND4(a,b,c,d, Z28,0x34, 6) - ROUND4(d,a,b,c, Z19,0x35,10) - ROUND4(c,d,a,b, Z26,0x36,15) - ROUND4(b,c,d,a, Z17,0x37,21) - ROUND4(a,b,c,d, Z24,0x38, 6) - ROUND4(d,a,b,c, Z31,0x39,10) - ROUND4(c,d,a,b, Z22,0x3a,15) - ROUND4(b,c,d,a, Z29,0x3b,21) - ROUND4(a,b,c,d, Z20,0x3c, 6) - ROUND4(d,a,b,c, Z27,0x3d,10) - ROUND4(c,d,a,b, Z18,0x3e,15) - ROUND4(b,c,d,a, Z25,0x3f,21) - - VPADDD sa, a, a - VPADDD sb, b, b - VPADDD sc, c, c - VPADDD sd, d, d - - LEAQ 64(base), base - SUBQ $64, count - JNE loop - - VMOVUPD a, (dig) - VMOVUPD b, 0x40(dig) - VMOVUPD c, 0x80(dig) - VMOVUPD d, 0xc0(dig) - - VZEROUPPER - RET diff --git a/mantle/vendor/github.com/minio/md5-simd/block8_amd64.s b/mantle/vendor/github.com/minio/md5-simd/block8_amd64.s deleted file mode 100644 index f5f1d9ca..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/block8_amd64.s +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright (c) 2018 Igneous Systems -// MIT License -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// Copyright (c) 2020 MinIO Inc. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// This is the AVX2 implementation of the MD5 block function (8-way parallel) - -// block8(state *uint64, base uintptr, bufs *int32, cache *byte, n int) -TEXT ·block8(SB), 4, $0-40 - MOVQ state+0(FP), BX - MOVQ base+8(FP), SI - MOVQ bufs+16(FP), AX - MOVQ cache+24(FP), CX - MOVQ n+32(FP), DX - MOVQ ·avx256md5consts+0(SB), DI - - // Align cache (which is stack allocated by the compiler) - // to a 256 bit boundary (ymm register alignment) - // The cache8 type is deliberately oversized to permit this. - ADDQ $31, CX - ANDB $-32, CL - -#define a Y0 -#define b Y1 -#define c Y2 -#define d Y3 - -#define sa Y4 -#define sb Y5 -#define sc Y6 -#define sd Y7 - -#define tmp Y8 -#define tmp2 Y9 - -#define mask Y10 -#define off Y11 - -#define ones Y12 - -#define rtmp1 Y13 -#define rtmp2 Y14 - -#define mem Y15 - -#define dig BX -#define cache CX -#define count DX -#define base SI -#define consts DI - -#define prepmask \ - VXORPS mask, mask, mask \ - VPCMPGTD mask, off, mask - -#define prep(index) \ - VMOVAPD mask, rtmp2 \ - VPGATHERDD rtmp2, index*4(base)(off*1), mem - -#define load(index) \ - VMOVAPD index*32(cache), mem - -#define store(index) \ - VMOVAPD mem, index*32(cache) - -#define roll(shift, a) \ - VPSLLD $shift, a, rtmp1 \ - VPSRLD $32-shift, a, a \ - VORPS rtmp1, a, a - -#define ROUND1(a, b, c, d, index, const, shift) \ - VXORPS c, tmp, tmp \ - VPADDD 32*const(consts), a, a \ - VPADDD mem, a, a \ - VANDPS b, tmp, tmp \ - VXORPS d, tmp, tmp \ - prep(index) \ - VPADDD tmp, a, a \ - roll(shift,a) \ - VMOVAPD c, tmp \ - VPADDD b, a, a - -#define ROUND1load(a, b, c, d, index, const, shift) \ - VXORPS c, tmp, tmp \ - VPADDD 32*const(consts), a, a \ - VPADDD mem, a, a \ - VANDPS b, tmp, tmp \ - VXORPS d, tmp, tmp \ - load(index) \ - VPADDD tmp, a, a \ - roll(shift,a) \ - VMOVAPD c, tmp \ - VPADDD b, a, a - -#define ROUND2(a, b, c, d, index, const, shift) \ - VPADDD 32*const(consts), a, a \ - VPADDD mem, a, a \ - VANDPS b, tmp2, tmp2 \ - VANDNPS c, tmp, tmp \ - load(index) \ - VORPS tmp, tmp2, tmp2 \ - VMOVAPD c, tmp \ - VPADDD tmp2, a, a \ - VMOVAPD c, tmp2 \ - roll(shift,a) \ - VPADDD b, a, a - -#define ROUND3(a, b, c, d, index, const, shift) \ - VPADDD 32*const(consts), a, a \ - VPADDD mem, a, a \ - load(index) \ - VXORPS d, tmp, tmp \ - VXORPS b, tmp, tmp \ - VPADDD tmp, a, a \ - roll(shift,a) \ - VMOVAPD b, tmp \ - VPADDD b, a, a - -#define ROUND4(a, b, c, d, index, const, shift) \ - VPADDD 32*const(consts), a, a \ - VPADDD mem, a, a \ - VORPS b, tmp, tmp \ - VXORPS c, tmp, tmp \ - VPADDD tmp, a, a \ - load(index) \ - roll(shift,a) \ - VXORPS c, ones, tmp \ - VPADDD b, a, a - - // load digest into state registers - VMOVUPD (dig), a - VMOVUPD 32(dig), b - VMOVUPD 64(dig), c - VMOVUPD 96(dig), d - - // load source buffer offsets - VMOVUPD (AX), off - - prepmask - VPCMPEQD ones, ones, ones - -loop: - VMOVAPD a, sa - VMOVAPD b, sb - VMOVAPD c, sc - VMOVAPD d, sd - - prep(0) - VMOVAPD d, tmp - store(0) - - ROUND1(a,b,c,d, 1,0x00, 7) - store(1) - ROUND1(d,a,b,c, 2,0x01,12) - store(2) - ROUND1(c,d,a,b, 3,0x02,17) - store(3) - ROUND1(b,c,d,a, 4,0x03,22) - store(4) - ROUND1(a,b,c,d, 5,0x04, 7) - store(5) - ROUND1(d,a,b,c, 6,0x05,12) - store(6) - ROUND1(c,d,a,b, 7,0x06,17) - store(7) - ROUND1(b,c,d,a, 8,0x07,22) - store(8) - ROUND1(a,b,c,d, 9,0x08, 7) - store(9) - ROUND1(d,a,b,c,10,0x09,12) - store(10) - ROUND1(c,d,a,b,11,0x0a,17) - store(11) - ROUND1(b,c,d,a,12,0x0b,22) - store(12) - ROUND1(a,b,c,d,13,0x0c, 7) - store(13) - ROUND1(d,a,b,c,14,0x0d,12) - store(14) - ROUND1(c,d,a,b,15,0x0e,17) - store(15) - ROUND1load(b,c,d,a, 1,0x0f,22) - - VMOVAPD d, tmp - VMOVAPD d, tmp2 - - ROUND2(a,b,c,d, 6,0x10, 5) - ROUND2(d,a,b,c,11,0x11, 9) - ROUND2(c,d,a,b, 0,0x12,14) - ROUND2(b,c,d,a, 5,0x13,20) - ROUND2(a,b,c,d,10,0x14, 5) - ROUND2(d,a,b,c,15,0x15, 9) - ROUND2(c,d,a,b, 4,0x16,14) - ROUND2(b,c,d,a, 9,0x17,20) - ROUND2(a,b,c,d,14,0x18, 5) - ROUND2(d,a,b,c, 3,0x19, 9) - ROUND2(c,d,a,b, 8,0x1a,14) - ROUND2(b,c,d,a,13,0x1b,20) - ROUND2(a,b,c,d, 2,0x1c, 5) - ROUND2(d,a,b,c, 7,0x1d, 9) - ROUND2(c,d,a,b,12,0x1e,14) - ROUND2(b,c,d,a, 0,0x1f,20) - - load(5) - VMOVAPD c, tmp - - ROUND3(a,b,c,d, 8,0x20, 4) - ROUND3(d,a,b,c,11,0x21,11) - ROUND3(c,d,a,b,14,0x22,16) - ROUND3(b,c,d,a, 1,0x23,23) - ROUND3(a,b,c,d, 4,0x24, 4) - ROUND3(d,a,b,c, 7,0x25,11) - ROUND3(c,d,a,b,10,0x26,16) - ROUND3(b,c,d,a,13,0x27,23) - ROUND3(a,b,c,d, 0,0x28, 4) - ROUND3(d,a,b,c, 3,0x29,11) - ROUND3(c,d,a,b, 6,0x2a,16) - ROUND3(b,c,d,a, 9,0x2b,23) - ROUND3(a,b,c,d,12,0x2c, 4) - ROUND3(d,a,b,c,15,0x2d,11) - ROUND3(c,d,a,b, 2,0x2e,16) - ROUND3(b,c,d,a, 0,0x2f,23) - - load(0) - VXORPS d, ones, tmp - - ROUND4(a,b,c,d, 7,0x30, 6) - ROUND4(d,a,b,c,14,0x31,10) - ROUND4(c,d,a,b, 5,0x32,15) - ROUND4(b,c,d,a,12,0x33,21) - ROUND4(a,b,c,d, 3,0x34, 6) - ROUND4(d,a,b,c,10,0x35,10) - ROUND4(c,d,a,b, 1,0x36,15) - ROUND4(b,c,d,a, 8,0x37,21) - ROUND4(a,b,c,d,15,0x38, 6) - ROUND4(d,a,b,c, 6,0x39,10) - ROUND4(c,d,a,b,13,0x3a,15) - ROUND4(b,c,d,a, 4,0x3b,21) - ROUND4(a,b,c,d,11,0x3c, 6) - ROUND4(d,a,b,c, 2,0x3d,10) - ROUND4(c,d,a,b, 9,0x3e,15) - ROUND4(b,c,d,a, 0,0x3f,21) - - VPADDD sa, a, a - VPADDD sb, b, b - VPADDD sc, c, c - VPADDD sd, d, d - - LEAQ 64(base), base - SUBQ $64, count - JNE loop - - VMOVUPD a, (dig) - VMOVUPD b, 32(dig) - VMOVUPD c, 64(dig) - VMOVUPD d, 96(dig) - - VZEROUPPER - RET diff --git a/mantle/vendor/github.com/minio/md5-simd/block_amd64.go b/mantle/vendor/github.com/minio/md5-simd/block_amd64.go deleted file mode 100644 index 27d6ce00..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/block_amd64.go +++ /dev/null @@ -1,199 +0,0 @@ -//+build !noasm,!appengine,gc - -// Copyright (c) 2020 MinIO Inc. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -package md5simd - -import ( - "fmt" - "math" - "sync" - "unsafe" - - "github.com/klauspost/cpuid" -) - -var hasAVX512 bool - -//go:noescape -func block8(state *uint32, base uintptr, bufs *int32, cache *byte, n int) - -//go:noescape -func block16(state *uint32, base uintptr, ptrs *int32, mask uint64, n int) - -// 8-way 4x uint32 digests in 4 ymm registers -// (ymm0, ymm1, ymm2, ymm3) -type digest8 struct { - v0, v1, v2, v3 [8]uint32 -} - -// Stack cache for 8x64 byte md5.BlockSize bytes. -// Must be 32-byte aligned, so allocate 512+32 and -// align upwards at runtime. -type cache8 [512 + 32]byte - -// MD5 magic numbers for one lane of hashing; inflated -// 8x below at init time. -var md5consts = [64]uint32{ - 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, - 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, - 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, - 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, - 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, - 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, - 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, - 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, - 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, - 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, - 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, - 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, - 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, - 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, - 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, - 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391, -} - -// inflate the consts 8-way for 8x md5 (256 bit ymm registers) -var avx256md5consts = func(c []uint32) []uint32 { - inf := make([]uint32, 8*len(c)) - for i := range c { - for j := 0; j < 8; j++ { - inf[(i*8)+j] = c[i] - } - } - return inf -}(md5consts[:]) - -// 16-way 4x uint32 digests in 4 zmm registers -type digest16 struct { - v0, v1, v2, v3 [16]uint32 -} - -// inflate the consts 16-way for 16x md5 (512 bit zmm registers) -var avx512md5consts = func(c []uint32) []uint32 { - inf := make([]uint32, 16*len(c)) - for i := range c { - for j := 0; j < 16; j++ { - inf[(i*16)+j] = c[i] - } - } - return inf -}(md5consts[:]) - -func init() { - hasAVX512 = cpuid.CPU.AVX512F() -} - -// Interface function to assembly code -func (s *md5Server) blockMd5_x16(d *digest16, input [16][]byte, half bool) { - if hasAVX512 { - blockMd5_avx512(d, input, s.allBufs, &s.maskRounds16) - } else { - d8a, d8b := digest8{}, digest8{} - for i := range d8a.v0 { - j := i + 8 - d8a.v0[i], d8a.v1[i], d8a.v2[i], d8a.v3[i] = d.v0[i], d.v1[i], d.v2[i], d.v3[i] - if !half { - d8b.v0[i], d8b.v1[i], d8b.v2[i], d8b.v3[i] = d.v0[j], d.v1[j], d.v2[j], d.v3[j] - } - } - - i8 := [2][8][]byte{} - for i := range i8[0] { - i8[0][i], i8[1][i] = input[i], input[8+i] - } - if half { - blockMd5_avx2(&d8a, i8[0], s.allBufs, &s.maskRounds8a) - } else { - wg := sync.WaitGroup{} - wg.Add(2) - go func() { blockMd5_avx2(&d8a, i8[0], s.allBufs, &s.maskRounds8a); wg.Done() }() - go func() { blockMd5_avx2(&d8b, i8[1], s.allBufs, &s.maskRounds8b); wg.Done() }() - wg.Wait() - } - - for i := range d8a.v0 { - j := i + 8 - d.v0[i], d.v1[i], d.v2[i], d.v3[i] = d8a.v0[i], d8a.v1[i], d8a.v2[i], d8a.v3[i] - if !half { - d.v0[j], d.v1[j], d.v2[j], d.v3[j] = d8b.v0[i], d8b.v1[i], d8b.v2[i], d8b.v3[i] - } - } - } -} - -// Interface function to AVX512 assembly code -func blockMd5_avx512(s *digest16, input [16][]byte, base []byte, maskRounds *[16]maskRounds) { - baseMin := uint64(uintptr(unsafe.Pointer(&(base[0])))) - ptrs := [16]int32{} - - for i := range ptrs { - if len(input[i]) > 0 { - if len(input[i]) > internalBlockSize { - panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i)) - } - - off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin - if off > math.MaxUint32 { - panic(fmt.Sprintf("invalid buffer sent with offset %x", off)) - } - ptrs[i] = int32(off) - } - } - - sdup := *s // create copy of initial states to receive intermediate updates - - rounds := generateMaskAndRounds16(input, maskRounds) - - for r := 0; r < rounds; r++ { - m := maskRounds[r] - - block16(&sdup.v0[0], uintptr(baseMin), &ptrs[0], m.mask, int(64*m.rounds)) - - for j := 0; j < len(ptrs); j++ { - ptrs[j] += int32(64 * m.rounds) // update pointers for next round - if m.mask&(1< 0 { - if len(input[i]) > internalBlockSize { - panic(fmt.Sprintf("Sanity check fails for lane %d: maximum input length cannot exceed internalBlockSize", i)) - } - - off := uint64(uintptr(unsafe.Pointer(&(input[i][0])))) - baseMin - if off > math.MaxUint32 { - panic(fmt.Sprintf("invalid buffer sent with offset %x", off)) - } - ptrs[i] = int32(off) - } - } - - sdup := *s // create copy of initial states to receive intermediate updates - - rounds := generateMaskAndRounds8(input, maskRounds) - - for r := 0; r < rounds; r++ { - m := maskRounds[r] - var cache cache8 // stack storage for block8 tmp state - block8(&sdup.v0[0], uintptr(baseMin), &ptrs[0], &cache[0], int(64*m.rounds)) - - for j := 0; j < len(ptrs); j++ { - ptrs[j] += int32(64 * m.rounds) // update pointers for next round - if m.mask&(1< internalBlockSize { - l = internalBlockSize - } - nnn, err := d.write(p[:l]) - if err != nil { - return nn, err - } - nn += nnn - p = p[l:] - - if len(p) == 0 { - break - } - - } - return -} - -func (d *md5Digest) write(p []byte) (nn int, err error) { - - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == BlockSize { - // Create a copy of the overflow buffer in order to send it async over the channel - // (since we will modify the overflow buffer down below with any access beyond multiples of 64) - tmp := <-d.buffers - tmp = tmp[:BlockSize] - copy(tmp, d.x[:]) - d.sendBlock(blockInput{uid: d.uid, msg: tmp}, len(p)-n < BlockSize) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= BlockSize { - n := len(p) &^ (BlockSize - 1) - buf := <-d.buffers - buf = buf[:n] - copy(buf, p) - d.sendBlock(blockInput{uid: d.uid, msg: buf}, len(p)-n < BlockSize) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d *md5Digest) Close() { - if d.blocksCh != nil { - close(d.blocksCh) - d.blocksCh = nil - } -} - -// Sum - Return MD5 sum in bytes -func (d *md5Digest) Sum(in []byte) (result []byte) { - if d.blocksCh == nil { - panic("sum after close") - } - - trail := <-d.buffers - trail = append(trail[:0], d.x[:d.nx]...) - - length := d.len - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - var tmp [64]byte - tmp[0] = 0x80 - if length%64 < 56 { - trail = append(trail, tmp[0:56-length%64]...) - } else { - trail = append(trail, tmp[0:64+56-length%64]...) - } - - // Length in bits. - length <<= 3 - binary.LittleEndian.PutUint64(tmp[:], length) // append length in bits - - trail = append(trail, tmp[0:8]...) - if len(trail)%BlockSize != 0 { - panic(fmt.Errorf("internal error: sum block was not aligned. len=%d, nx=%d", len(trail), d.nx)) - } - sumCh := make(chan sumResult, 1) - d.sendBlock(blockInput{uid: d.uid, msg: trail, sumCh: sumCh}, true) - - sum := <-sumCh - - return append(in, sum.digest[:]...) -} - -// sendBlock will send a block for processing. -// If cycle is true we will block on cycle, otherwise we will only block -// if the block channel is full. -func (d *md5Digest) sendBlock(bi blockInput, cycle bool) { - if cycle { - select { - case d.blocksCh <- bi: - d.cycleServer <- d.uid - } - return - } - // Only block on cycle if we filled the buffer - select { - case d.blocksCh <- bi: - return - default: - d.cycleServer <- d.uid - d.blocksCh <- bi - } -} diff --git a/mantle/vendor/github.com/minio/md5-simd/md5-server_amd64.go b/mantle/vendor/github.com/minio/md5-simd/md5-server_amd64.go deleted file mode 100644 index 46105953..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/md5-server_amd64.go +++ /dev/null @@ -1,307 +0,0 @@ -//+build !noasm,!appengine,gc - -// Copyright (c) 2020 MinIO Inc. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -package md5simd - -import ( - "encoding/binary" - "fmt" - "runtime" - - "github.com/klauspost/cpuid" -) - -// MD5 initialization constants -const ( - // Lanes is the number of concurrently calculated hashes. - Lanes = 16 - - init0 = 0x67452301 - init1 = 0xefcdab89 - init2 = 0x98badcfe - init3 = 0x10325476 -) - -// md5ServerUID - Does not start at 0 but next multiple of 16 so as to be able to -// differentiate with default initialisation value of 0 -const md5ServerUID = Lanes - -const buffersPerLane = 3 - -// Message to send across input channel -type blockInput struct { - uid uint64 - msg []byte - sumCh chan sumResult - reset bool -} - -type sumResult struct { - digest [Size]byte -} - -type lanesInfo [Lanes]blockInput - -// md5Server - Type to implement parallel handling of MD5 invocations -type md5Server struct { - uidCounter uint64 - cycle chan uint64 // client with uid has update. - newInput chan newClient // Add new client. - digests map[uint64][Size]byte // Map of uids to (interim) digest results - maskRounds16 [16]maskRounds // Pre-allocated static array for max 16 rounds - maskRounds8a [8]maskRounds // Pre-allocated static array for max 8 rounds (1st AVX2 core) - maskRounds8b [8]maskRounds // Pre-allocated static array for max 8 rounds (2nd AVX2 core) - allBufs []byte // Preallocated buffer. - buffers chan []byte // Preallocated buffers, sliced from allBufs. -} - -// NewServer - Create new object for parallel processing handling -func NewServer() Server { - if !cpuid.CPU.AVX2() { - return &fallbackServer{} - } - md5srv := &md5Server{} - md5srv.digests = make(map[uint64][Size]byte) - md5srv.newInput = make(chan newClient, Lanes) - md5srv.cycle = make(chan uint64, Lanes*10) - md5srv.uidCounter = md5ServerUID - 1 - md5srv.allBufs = make([]byte, 32+buffersPerLane*Lanes*internalBlockSize) - md5srv.buffers = make(chan []byte, buffersPerLane*Lanes) - // Fill buffers. - for i := 0; i < buffersPerLane*Lanes; i++ { - s := 32 + i*internalBlockSize - md5srv.buffers <- md5srv.allBufs[s : s+internalBlockSize : s+internalBlockSize] - } - - // Start a single thread for reading from the input channel - go md5srv.process(md5srv.newInput) - return md5srv -} - -type newClient struct { - uid uint64 - input chan blockInput -} - -// process - Sole handler for reading from the input channel. -func (s *md5Server) process(newClients chan newClient) { - // To fill up as many lanes as possible: - // - // 1. Wait for a cycle id. - // 2. If not already in a lane, add, otherwise leave on channel - // 3. Start timer - // 4. Check if lanes is full, if so, goto 10 (process). - // 5. If timeout, goto 10. - // 6. Wait for new id (goto 2) or timeout (goto 10). - // 10. Process. - // 11. Check all input if there is already input, if so add to lanes. - // 12. Goto 1 - - // lanes contains the lanes. - var lanes lanesInfo - // lanesFilled contains the number of filled lanes for current cycle. - var lanesFilled int - // clients contains active clients - var clients = make(map[uint64]chan blockInput, Lanes) - - addToLane := func(uid uint64) { - cl, ok := clients[uid] - if !ok { - // Unknown client. Maybe it was already removed. - return - } - // Check if we already have it. - for _, lane := range lanes[:lanesFilled] { - if lane.uid == uid { - return - } - } - // Continue until we get a block or there is nothing on channel - for { - select { - case block, ok := <-cl: - if !ok { - // Client disconnected - delete(clients, block.uid) - return - } - if block.uid != uid { - panic(fmt.Errorf("uid mismatch, %d (block) != %d (client)", block.uid, uid)) - } - // If reset message, reset and we're done - if block.reset { - delete(s.digests, uid) - continue - } - - // If requesting sum, we will need to maintain state. - if block.sumCh != nil { - var dig digest - d, ok := s.digests[uid] - if ok { - dig.s[0] = binary.LittleEndian.Uint32(d[0:4]) - dig.s[1] = binary.LittleEndian.Uint32(d[4:8]) - dig.s[2] = binary.LittleEndian.Uint32(d[8:12]) - dig.s[3] = binary.LittleEndian.Uint32(d[12:16]) - } else { - dig.s[0], dig.s[1], dig.s[2], dig.s[3] = init0, init1, init2, init3 - } - - sum := sumResult{} - // Add end block to current digest. - blockGeneric(&dig, block.msg) - - binary.LittleEndian.PutUint32(sum.digest[0:], dig.s[0]) - binary.LittleEndian.PutUint32(sum.digest[4:], dig.s[1]) - binary.LittleEndian.PutUint32(sum.digest[8:], dig.s[2]) - binary.LittleEndian.PutUint32(sum.digest[12:], dig.s[3]) - block.sumCh <- sum - if block.msg != nil { - s.buffers <- block.msg - } - continue - } - if len(block.msg) == 0 { - continue - } - lanes[lanesFilled] = block - lanesFilled++ - return - default: - return - } - } - } - addNewClient := func(cl newClient) { - if _, ok := clients[cl.uid]; ok { - panic("internal error: duplicate client registration") - } - clients[cl.uid] = cl.input - } - - allLanesFilled := func() bool { - return lanesFilled == Lanes || lanesFilled >= len(clients) - } - - for { - // Step 1. - for lanesFilled == 0 { - select { - case cl, ok := <-newClients: - if !ok { - return - } - addNewClient(cl) - // Check if it already sent a payload. - addToLane(cl.uid) - continue - case uid := <-s.cycle: - addToLane(uid) - } - } - - fillLanes: - for !allLanesFilled() { - select { - case cl, ok := <-newClients: - if !ok { - return - } - addNewClient(cl) - - case uid := <-s.cycle: - addToLane(uid) - default: - // Nothing more queued... - break fillLanes - } - } - - // If we did not fill all lanes, check if there is more waiting - if !allLanesFilled() { - runtime.Gosched() - for uid := range clients { - addToLane(uid) - if allLanesFilled() { - break - } - } - } - if false { - if !allLanesFilled() { - fmt.Println("Not all lanes filled", lanesFilled, "of", len(clients)) - //pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - } else if true { - fmt.Println("all lanes filled") - } - } - // Process the lanes we could collect - s.blocks(lanes[:lanesFilled]) - - // Clear lanes... - lanesFilled = 0 - // Add all current queued - for uid := range clients { - addToLane(uid) - if allLanesFilled() { - break - } - } - } -} - -func (s *md5Server) Close() { - if s.newInput != nil { - close(s.newInput) - s.newInput = nil - } -} - -// Invoke assembly and send results back -func (s *md5Server) blocks(lanes []blockInput) { - inputs := [16][]byte{} - for i := range lanes { - inputs[i] = lanes[i].msg - } - - // Collect active digests... - state := s.getDigests(lanes) - // Process all lanes... - s.blockMd5_x16(&state, inputs, len(lanes) <= 8) - - for i, lane := range lanes { - uid := lane.uid - dig := [Size]byte{} - binary.LittleEndian.PutUint32(dig[0:], state.v0[i]) - binary.LittleEndian.PutUint32(dig[4:], state.v1[i]) - binary.LittleEndian.PutUint32(dig[8:], state.v2[i]) - binary.LittleEndian.PutUint32(dig[12:], state.v3[i]) - - s.digests[uid] = dig - if lane.msg != nil { - s.buffers <- lane.msg - } - lanes[i] = blockInput{} - } -} - -func (s *md5Server) getDigests(lanes []blockInput) (d digest16) { - for i, lane := range lanes { - a, ok := s.digests[lane.uid] - if ok { - d.v0[i] = binary.LittleEndian.Uint32(a[0:4]) - d.v1[i] = binary.LittleEndian.Uint32(a[4:8]) - d.v2[i] = binary.LittleEndian.Uint32(a[8:12]) - d.v3[i] = binary.LittleEndian.Uint32(a[12:16]) - } else { - d.v0[i] = init0 - d.v1[i] = init1 - d.v2[i] = init2 - d.v3[i] = init3 - } - } - return -} diff --git a/mantle/vendor/github.com/minio/md5-simd/md5-server_fallback.go b/mantle/vendor/github.com/minio/md5-simd/md5-server_fallback.go deleted file mode 100644 index 7814dada..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/md5-server_fallback.go +++ /dev/null @@ -1,12 +0,0 @@ -//+build !amd64 appengine !gc noasm - -// Copyright (c) 2020 MinIO Inc. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -package md5simd - -// NewServer - Create new object for parallel processing handling -func NewServer() *fallbackServer { - return &fallbackServer{} -} diff --git a/mantle/vendor/github.com/minio/md5-simd/md5-util_amd64.go b/mantle/vendor/github.com/minio/md5-simd/md5-util_amd64.go deleted file mode 100644 index 32bbae4a..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/md5-util_amd64.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2020 MinIO Inc. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -package md5simd - -import ( - "sort" -) - -// Helper struct for sorting blocks based on length -type lane struct { - len uint - pos uint -} - -// Helper struct for generating number of rounds in combination with mask for valid lanes -type maskRounds struct { - mask uint64 - rounds uint64 -} - -func generateMaskAndRounds8(input [8][]byte, mr *[8]maskRounds) (rounds int) { - // Sort on blocks length small to large - var sorted [8]lane - for c, inpt := range input { - sorted[c] = lane{uint(len(inpt)), uint(c)} - } - sort.Slice(sorted[:], func(i, j int) bool { return sorted[i].len < sorted[j].len }) - - // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks - m, round := uint64(0xff), uint64(0) - - for _, s := range sorted { - if s.len > 0 { - if uint64(s.len)>>6 > round { - mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round} - rounds++ - } - round = uint64(s.len) >> 6 - } - m = m & ^(1 << uint(s.pos)) - } - return -} - -func generateMaskAndRounds16(input [16][]byte, mr *[16]maskRounds) (rounds int) { - - // Sort on blocks length small to large - var sorted [16]lane - for c, inpt := range input { - sorted[c] = lane{uint(len(inpt)), uint(c)} - } - sort.Slice(sorted[:], func(i, j int) bool { return sorted[i].len < sorted[j].len }) - - // Create mask array including 'rounds' (of processing blocks of 64 bytes) between masks - m, round := uint64(0xffff), uint64(0) - - for _, s := range sorted { - if s.len > 0 { - if uint64(s.len)>>6 > round { - mr[rounds] = maskRounds{m, (uint64(s.len) >> 6) - round} - rounds++ - } - round = uint64(s.len) >> 6 - } - m = m & ^(1 << uint(s.pos)) - } - return -} diff --git a/mantle/vendor/github.com/minio/md5-simd/md5.go b/mantle/vendor/github.com/minio/md5-simd/md5.go deleted file mode 100644 index 4f56b79d..00000000 --- a/mantle/vendor/github.com/minio/md5-simd/md5.go +++ /dev/null @@ -1,57 +0,0 @@ -package md5simd - -import ( - "crypto/md5" - "hash" - "sync" -) - -const ( - // The blocksize of MD5 in bytes. - BlockSize = 64 - - // The size of an MD5 checksum in bytes. - Size = 16 - - // internalBlockSize is the internal block size. - internalBlockSize = 32 << 10 -) - -type Server interface { - NewHash() Hasher - Close() -} - -type Hasher interface { - hash.Hash - Close() -} - -// md5Wrapper is a wrapper around the builtin hasher. -type md5Wrapper struct { - hash.Hash -} - -var md5Pool = sync.Pool{New: func() interface{} { - return md5.New() -}} - -// fallbackServer - Fallback when no assembly is available. -type fallbackServer struct { -} - -// NewHash -- return regular Golang md5 hashing from crypto -func (s *fallbackServer) NewHash() Hasher { - return &md5Wrapper{Hash: md5Pool.New().(hash.Hash)} -} - -func (s *fallbackServer) Close() { -} - -func (m *md5Wrapper) Close() { - if m.Hash != nil { - m.Reset() - md5Pool.Put(m.Hash) - m.Hash = nil - } -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/.gitignore b/mantle/vendor/github.com/minio/minio-go/v7/.gitignore deleted file mode 100644 index 8081bd0f..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*~ -*.test -validator -golangci-lint \ No newline at end of file diff --git a/mantle/vendor/github.com/minio/minio-go/v7/.golangci.yml b/mantle/vendor/github.com/minio/minio-go/v7/.golangci.yml deleted file mode 100644 index 7d1dd335..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/.golangci.yml +++ /dev/null @@ -1,16 +0,0 @@ -linters-settings: - misspell: - locale: US - -linters: - disable-all: true - enable: - - typecheck - - goimports - - misspell - - govet - - golint - - ineffassign - - gosimple - - deadcode - - structcheck diff --git a/mantle/vendor/github.com/minio/minio-go/v7/CNAME b/mantle/vendor/github.com/minio/minio-go/v7/CNAME deleted file mode 100644 index d365a7bb..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/CNAME +++ /dev/null @@ -1 +0,0 @@ -minio-go.min.io \ No newline at end of file diff --git a/mantle/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md b/mantle/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md deleted file mode 100644 index 8b1ee86c..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ - -### Developer Guidelines - -``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: - -* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. - - Fork it - - Create your feature branch (git checkout -b my-new-feature) - - Commit your changes (git commit -am 'Add some feature') - - Push to the branch (git push origin my-new-feature) - - Create new Pull Request - -* When you're ready to create a pull request, be sure to: - - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. - - Run `go fmt` - - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. - - Make sure `go test -race ./...` and `go build` completes. - NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables - ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` - -* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project - - `minio-go` project is strictly conformant with Golang style - - if you happen to observe offending code, please feel free to send a pull request diff --git a/mantle/vendor/github.com/minio/minio-go/v7/LICENSE b/mantle/vendor/github.com/minio/minio-go/v7/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/mantle/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md b/mantle/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md deleted file mode 100644 index f640dfb9..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/MAINTAINERS.md +++ /dev/null @@ -1,35 +0,0 @@ -# For maintainers only - -## Responsibilities - -Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) - -### Making new releases -Tag and sign your release commit, additionally this step requires you to have access to MinIO's trusted private key. -```sh -$ export GNUPGHOME=/media/${USER}/minio/trusted -$ git tag -s 4.0.0 -$ git push -$ git push --tags -``` - -### Update version -Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. - -```sh -$ grep libraryVersion api.go - libraryVersion = "4.0.1" -``` - -Commit your changes -``` -$ git commit -a -m "Update version for next release" --author "MinIO Trusted " -``` - -### Announce -Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@min.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. - -To generate `changelog` -```sh -$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. -``` diff --git a/mantle/vendor/github.com/minio/minio-go/v7/Makefile b/mantle/vendor/github.com/minio/minio-go/v7/Makefile deleted file mode 100644 index ae640c4d..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -GOPATH := $(shell go env GOPATH) -TMPDIR := $(shell mktemp -d) - -all: checks - -.PHONY: examples docs - -checks: lint vet test examples functional-test - -lint: - @mkdir -p ${GOPATH}/bin - @which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0) - @echo "Running $@ check" - @GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean - @GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml - -vet: - @GO111MODULE=on go vet ./... - -test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... - -examples: - @echo "Building s3 examples" - @cd ./examples/s3 && $(foreach v,$(wildcard examples/s3/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) - @echo "Building minio examples" - @cd ./examples/minio && $(foreach v,$(wildcard examples/minio/*.go),go build -mod=mod -o ${TMPDIR}/$(basename $(v)) $(notdir $(v)) || exit 1;) - -functional-test: - @GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go - -clean: - @echo "Cleaning up all the generated files" - @find . -name '*.test' | xargs rm -fv - @find . -name '*~' | xargs rm -fv diff --git a/mantle/vendor/github.com/minio/minio-go/v7/NOTICE b/mantle/vendor/github.com/minio/minio-go/v7/NOTICE deleted file mode 100644 index 1e8fd3b9..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/NOTICE +++ /dev/null @@ -1,9 +0,0 @@ -MinIO Cloud Storage, (C) 2014-2020 MinIO, Inc. - -This product includes software developed at MinIO, Inc. -(https://min.io/). - -The MinIO project contains unmodified/modified subcomponents too with -separate copyright notices and license terms. Your use of the source -code for these subcomponents is subject to the terms and conditions -of Apache License Version 2.0 diff --git a/mantle/vendor/github.com/minio/minio-go/v7/README.md b/mantle/vendor/github.com/minio/minio-go/v7/README.md deleted file mode 100644 index b5c26d53..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/README.md +++ /dev/null @@ -1,251 +0,0 @@ -# MinIO Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) - -The MinIO Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. - -This quickstart guide will show you how to install the MinIO client SDK, connect to MinIO, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference). - -This document assumes that you have a working [Go development environment](https://golang.org/doc/install). - -## Download from Github -```sh -GO111MODULE=on go get github.com/minio/minio-go/v7 -``` - -## Initialize MinIO Client -MinIO client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. - -| Parameter | Description| -| :--- | :--- | -| endpoint | URL to object storage service. | -| _minio.Options_ | All the options such as credentials, custom transport etc. | - -```go -package main - -import ( - "log" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" -) - -func main() { - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // Initialize minio client object. - minioClient, err := minio.New(endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), - Secure: useSSL, - }) - if err != nil { - log.Fatalln(err) - } - - log.Printf("%#v\n", minioClient) // minioClient is now set up -} -``` - -## Quick Start Example - File Uploader -This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. - -We will use the MinIO server running at [https://play.min.io](https://play.min.io) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. - -### FileUploader.go -```go -package main - -import ( - "context" - "log" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" -) - -func main() { - ctx := context.Background() - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // Initialize minio client object. - minioClient, err := minio.New(endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), - Secure: useSSL, - }) - if err != nil { - log.Fatalln(err) - } - - // Make a new bucket called mymusic. - bucketName := "mymusic" - location := "us-east-1" - - err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) - if err != nil { - // Check to see if we already own this bucket (which happens if you run this twice) - exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) - if errBucketExists == nil && exists { - log.Printf("We already own %s\n", bucketName) - } else { - log.Fatalln(err) - } - } else { - log.Printf("Successfully created %s\n", bucketName) - } - - // Upload the zip file - objectName := "golden-oldies.zip" - filePath := "/tmp/golden-oldies.zip" - contentType := "application/zip" - - // Upload the zip file with FPutObject - info, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Successfully uploaded %s of size %d\n", objectName, info.Size) -} -``` - -### Run FileUploader -```sh -export GO111MODULE=on -go run file-uploader.go -2016/08/13 17:03:28 Successfully created mymusic -2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 - -mc ls play/mymusic/ -[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip -``` - -## API Reference -The full API Reference is available here. - -* [Complete API Reference](https://docs.min.io/docs/golang-client-api-reference) - -### API Reference : Bucket Operations -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) - -### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) - -### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO Extension) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO Extension) - -### API Reference : File Object Operations -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FGetObject) - -### API Reference : Object Operations -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) - - -### API Reference : Presigned Operations -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) - -### API Reference : Client custom settings -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) - -## Full Examples - -### Full Examples : Bucket Operations -* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) -* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) -* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) -* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) -* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) -* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) -* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) - -### Full Examples : Bucket policy Operations -* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) -* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) -* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - -### Full Examples : Bucket lifecycle Operations -* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) -* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) - -### Full Examples : Bucket encryption Operations -* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) -* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) -* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) - -### Full Examples : Bucket replication Operations -* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) -* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) -* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) - -### Full Examples : Bucket notification Operations -* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) -* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) -* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO Extension) -* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO Extension) - -### Full Examples : File Object Operations -* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) -* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) -* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) -* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) - -### Full Examples : Object Operations -* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) -* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) -* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) -* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) -* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) -* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) -* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) -* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) - -### Full Examples : Encrypted Object Operations -* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) -* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) -* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) - -### Full Examples : Presigned Operations -* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) -* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) -* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) -* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) - -## Explore Further -* [Complete Documentation](https://docs.min.io) -* [MinIO Go Client SDK API Reference](https://docs.min.io/docs/golang-client-api-reference) - -## Contribute -[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) - -## License -This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](https://github.com/minio/minio-go/blob/master/LICENSE) and [NOTICE](https://github.com/minio/minio-go/blob/master/NOTICE) for more information. diff --git a/mantle/vendor/github.com/minio/minio-go/v7/README_zh_CN.md b/mantle/vendor/github.com/minio/minio-go/v7/README_zh_CN.md deleted file mode 100644 index 64e79341..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/README_zh_CN.md +++ /dev/null @@ -1,260 +0,0 @@ -# 适用于与Amazon S3兼容云存储的MinIO Go SDK [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) - -MinIO Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 - -**支持的云存储:** - -- AWS Signature Version 4 - - Amazon S3 - - MinIO - -- AWS Signature Version 2 - - Google Cloud Storage (兼容模式) - - Openstack Swift + Swift3 middleware - - Ceph Object Gateway - - Riak CS - -本文我们将学习如何安装MinIO client SDK,连接到MinIO,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.min.io/docs/golang-client-api-reference)。 - -本文假设你已经有 [Go开发环境](https://golang.org/doc/install)。 - -## 从Github下载 -```sh -go get -u github.com/minio/minio-go -``` - -## 初始化MinIO Client -MinIO client需要以下4个参数来连接与Amazon S3兼容的对象存储。 - -| 参数 | 描述| -| :--- | :--- | -| endpoint | 对象存储服务的URL | -| accessKeyID | Access key是唯一标识你的账户的用户ID。 | -| secretAccessKey | Secret key是你账户的密码。 | -| secure | true代表使用HTTPS | - - -```go -package main - -import ( - "log" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" -) - -func main() { - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // 初使化 minio client对象。 - minioClient, err := minio.New(endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), - Secure: useSSL, - }) - if err != nil { - log.Fatalln(err) - } - - log.Printf("%#v\n", minioClient) // minioClient初使化成功 -} -``` - -## 示例-文件上传 -本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 - -我们在本示例中使用运行在 [https://play.min.io](https://play.min.io) 上的MinIO服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 - -### FileUploader.go -```go -package main - -import ( - "context" - "log" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" -) - -func main() { - ctx := context.Background() - endpoint := "play.min.io" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // 初使化 minio client对象。 - minioClient, err := minio.New(endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), - Secure: useSSL, - }) - if err != nil { - log.Fatalln(err) - } - - // 创建一个叫mymusic的存储桶。 - bucketName := "mymusic" - location := "us-east-1" - - err = minioClient.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: location}) - if err != nil { - // 检查存储桶是否已经存在。 - exists, errBucketExists := minioClient.BucketExists(ctx, bucketName) - if errBucketExists == nil && exists { - log.Printf("We already own %s\n", bucketName) - } else { - log.Fatalln(err) - } - } else { - log.Printf("Successfully created %s\n", bucketName) - } - - // 上传一个zip文件。 - objectName := "golden-oldies.zip" - filePath := "/tmp/golden-oldies.zip" - contentType := "application/zip" - - // 使用FPutObject上传一个zip文件。 - n, err := minioClient.FPutObject(ctx, bucketName, objectName, filePath, minio.PutObjectOptions{ContentType: contentType}) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Successfully uploaded %s of size %d\n", objectName, n) -} -``` - -### 运行FileUploader -```sh -go run file-uploader.go -2016/08/13 17:03:28 Successfully created mymusic -2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 - -mc ls play/mymusic/ -[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip -``` - -## API文档 -完整的API文档在这里。 -* [完整API文档](https://docs.min.io/docs/golang-client-api-reference) - -### API文档 : 操作存储桶 -* [`MakeBucket`](https://docs.min.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.min.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.min.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.min.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.min.io/docs/golang-client-api-reference#ListObjects) -* [`ListIncompleteUploads`](https://docs.min.io/docs/golang-client-api-reference#ListIncompleteUploads) - -### API文档 : 存储桶策略 -* [`SetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.min.io/docs/golang-client-api-reference#GetBucketPolicy) - -### API文档 : 存储桶通知 -* [`SetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenBucketNotification) (MinIO 扩展) -* [`ListenNotification`](https://docs.min.io/docs/golang-client-api-reference#ListenNotification) (MinIO 扩展) - -### API文档 : 操作文件对象 -* [`FPutObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.min.io/docs/golang-client-api-reference#FPutObject) - -### API文档 : 操作对象 -* [`GetObject`](https://docs.min.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.min.io/docs/golang-client-api-reference#PutObject) -* [`PutObjectStreaming`](https://docs.min.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.min.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.min.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.min.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.min.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.min.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.min.io/docs/golang-client-api-reference#SelectObjectContent) - -### API文档 : Presigned操作 -* [`PresignedGetObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.min.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.min.io/docs/golang-client-api-reference#PresignedPostPolicy) - -### API文档 : 客户端自定义设置 -* [`SetAppInfo`](http://docs.min.io/docs/golang-client-api-reference#SetAppInfo) -* [`TraceOn`](http://docs.min.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.min.io/docs/golang-client-api-reference#TraceOff) - -## 完整示例 - -### 完整示例 : 操作存储桶 -* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) -* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) -* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) -* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) -* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) -* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) -* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) - -### 完整示例 : 存储桶策略 -* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) -* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) -* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - -### 完整示例 : 存储桶生命周期 -* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) -* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) - -### 完整示例 : 存储桶加密 -* [setbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketencryption.go) -* [getbucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketencryption.go) -* [deletebucketencryption.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketencryption.go) - -### 完整示例 : 存储桶复制 -* [setbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketreplication.go) -* [getbucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketreplication.go) -* [removebucketreplication.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucketreplication.go) - -### 完整示例 : 存储桶通知 -* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) -* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) -* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (MinIO扩展) -* [listennotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listen-notification.go) (MinIO 扩展) - -### 完整示例 : 操作文件对象 -* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) -* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) -* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) -* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) - -### 完整示例 : 操作对象 -* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) -* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) -* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) -* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) -* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) -* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) -* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) -* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) - -### 完整示例 : 操作加密对象 -* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) -* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) -* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) - -### 完整示例 : Presigned操作 -* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) -* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) -* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) -* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) - -## 了解更多 -* [完整文档](https://docs.min.io) -* [MinIO Go Client SDK API文档](https://docs.min.io/docs/golang-client-api-reference) - -## 贡献 -[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go b/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go deleted file mode 100644 index e02ab84a..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-encryption.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/sse" -) - -// SetBucketEncryption sets the default encryption configuration on an existing bucket. -func (c Client) SetBucketEncryption(ctx context.Context, bucketName string, config *sse.Configuration) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if config == nil { - return errInvalidArgument("configuration cannot be empty") - } - - buf, err := xml.Marshal(config) - if err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("encryption", "") - - // Content-length is mandatory to set a default encryption configuration - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(buf), - contentLength: int64(len(buf)), - contentMD5Base64: sumMD5Base64(buf), - } - - // Execute PUT to upload a new bucket default encryption configuration. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} - -// RemoveBucketEncryption removes the default encryption configuration on a bucket with a context to control cancellations and timeouts. -func (c Client) RemoveBucketEncryption(ctx context.Context, bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("encryption", "") - - // DELETE default encryption configuration on a bucket. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} - -// GetBucketEncryption gets the default encryption configuration -// on an existing bucket with a context to control cancellations and timeouts. -func (c Client) GetBucketEncryption(ctx context.Context, bucketName string) (*sse.Configuration, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("encryption", "") - - // Execute GET on bucket to get the default encryption configuration. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - - encryptionConfig := &sse.Configuration{} - if err = xmlDecoder(resp.Body, encryptionConfig); err != nil { - return nil, err - } - - return encryptionConfig, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go b/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go deleted file mode 100644 index e1fac813..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-lifecycle.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "io/ioutil" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/lifecycle" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// SetBucketLifecycle set the lifecycle on an existing bucket. -func (c Client) SetBucketLifecycle(ctx context.Context, bucketName string, config *lifecycle.Configuration) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If lifecycle is empty then delete it. - if config.Empty() { - return c.removeBucketLifecycle(ctx, bucketName) - } - - buf, err := xml.Marshal(config) - if err != nil { - return err - } - - // Save the updated lifecycle. - return c.putBucketLifecycle(ctx, bucketName, buf) -} - -// Saves a new bucket lifecycle. -func (c Client) putBucketLifecycle(ctx context.Context, bucketName string, buf []byte) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Content-length is mandatory for put lifecycle request - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(buf), - contentLength: int64(len(buf)), - contentMD5Base64: sumMD5Base64(buf), - } - - // Execute PUT to upload a new bucket lifecycle. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Remove lifecycle from a bucket. -func (c Client) removeBucketLifecycle(ctx context.Context, bucketName string) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// GetBucketLifecycle fetch bucket lifecycle configuration -func (c Client) GetBucketLifecycle(ctx context.Context, bucketName string) (*lifecycle.Configuration, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - - bucketLifecycle, err := c.getBucketLifecycle(ctx, bucketName) - if err != nil { - return nil, err - } - - config := lifecycle.NewConfiguration() - if err = xml.Unmarshal(bucketLifecycle, config); err != nil { - return nil, err - } - return config, nil -} - -// Request server for current bucket lifecycle. -func (c Client) getBucketLifecycle(ctx context.Context, bucketName string) ([]byte, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute GET on bucket to get lifecycle. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - } - - return ioutil.ReadAll(resp.Body) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go b/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go deleted file mode 100644 index 76787eca..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-notification.go +++ /dev/null @@ -1,255 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bufio" - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - "time" - - jsoniter "github.com/json-iterator/go" - "github.com/minio/minio-go/v7/pkg/notification" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// SetBucketNotification saves a new bucket notification with a context to control cancellations and timeouts. -func (c Client) SetBucketNotification(ctx context.Context, bucketName string, config notification.Configuration) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("notification", "") - - notifBytes, err := xml.Marshal(&config) - if err != nil { - return err - } - - notifBuffer := bytes.NewReader(notifBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: notifBuffer, - contentLength: int64(len(notifBytes)), - contentMD5Base64: sumMD5Base64(notifBytes), - contentSHA256Hex: sum256Hex(notifBytes), - } - - // Execute PUT to upload a new bucket notification. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config -func (c Client) RemoveAllBucketNotification(ctx context.Context, bucketName string) error { - return c.SetBucketNotification(ctx, bucketName, notification.Configuration{}) -} - -// GetBucketNotification returns current bucket notification configuration -func (c Client) GetBucketNotification(ctx context.Context, bucketName string) (bucketNotification notification.Configuration, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return notification.Configuration{}, err - } - return c.getBucketNotification(ctx, bucketName) -} - -// Request server for notification rules. -func (c Client) getBucketNotification(ctx context.Context, bucketName string) (notification.Configuration, error) { - urlValues := make(url.Values) - urlValues.Set("notification", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return notification.Configuration{}, err - } - return processBucketNotificationResponse(bucketName, resp) - -} - -// processes the GetNotification http response from the server. -func processBucketNotificationResponse(bucketName string, resp *http.Response) (notification.Configuration, error) { - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - return notification.Configuration{}, errResponse - } - var bucketNotification notification.Configuration - err := xmlDecoder(resp.Body, &bucketNotification) - if err != nil { - return notification.Configuration{}, err - } - return bucketNotification, nil -} - -// ListenNotification listen for all events, this is a MinIO specific API -func (c Client) ListenNotification(ctx context.Context, prefix, suffix string, events []string) <-chan notification.Info { - return c.ListenBucketNotification(ctx, "", prefix, suffix, events) -} - -// ListenBucketNotification listen for bucket events, this is a MinIO specific API -func (c Client) ListenBucketNotification(ctx context.Context, bucketName, prefix, suffix string, events []string) <-chan notification.Info { - notificationInfoCh := make(chan notification.Info, 1) - const notificationCapacity = 4 * 1024 * 1024 - notificationEventBuffer := make([]byte, notificationCapacity) - // Only success, start a routine to start reading line by line. - go func(notificationInfoCh chan<- notification.Info) { - defer close(notificationInfoCh) - - // Validate the bucket name. - if bucketName != "" { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - select { - case notificationInfoCh <- notification.Info{ - Err: err, - }: - case <-ctx.Done(): - } - return - } - } - - // Check ARN partition to verify if listening bucket is supported - if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { - select { - case notificationInfoCh <- notification.Info{ - Err: errAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), - }: - case <-ctx.Done(): - } - return - } - - // Continuously run and listen on bucket notification. - // Create a done channel to control 'ListObjects' go routine. - retryDoneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(retryDoneCh) - - // Prepare urlValues to pass into the request on every loop - urlValues := make(url.Values) - urlValues.Set("prefix", prefix) - urlValues.Set("suffix", suffix) - urlValues["events"] = events - - // Wait on the jitter retry loop. - for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - select { - case notificationInfoCh <- notification.Info{ - Err: err, - }: - case <-ctx.Done(): - } - return - } - - // Validate http response, upon error return quickly. - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - select { - case notificationInfoCh <- notification.Info{ - Err: errResponse, - }: - case <-ctx.Done(): - } - return - } - - // Initialize a new bufio scanner, to read line by line. - bio := bufio.NewScanner(resp.Body) - - // Use a higher buffer to support unexpected - // caching done by proxies - bio.Buffer(notificationEventBuffer, notificationCapacity) - var json = jsoniter.ConfigCompatibleWithStandardLibrary - - // Unmarshal each line, returns marshaled values. - for bio.Scan() { - var notificationInfo notification.Info - if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { - // Unexpected error during json unmarshal, send - // the error to caller for actionable as needed. - select { - case notificationInfoCh <- notification.Info{ - Err: err, - }: - case <-ctx.Done(): - return - } - closeResponse(resp) - continue - } - // Send notificationInfo - select { - case notificationInfoCh <- notificationInfo: - case <-ctx.Done(): - closeResponse(resp) - return - } - } - - if err = bio.Err(); err != nil { - select { - case notificationInfoCh <- notification.Info{ - Err: err, - }: - case <-ctx.Done(): - return - } - } - - // Close current connection before looping further. - closeResponse(resp) - - } - }(notificationInfoCh) - - // Returns the notification info channel, for caller to start reading from. - return notificationInfoCh -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go b/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go deleted file mode 100644 index 7e01275d..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-policy.go +++ /dev/null @@ -1,142 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// SetBucketPolicy sets the access permissions on an existing bucket. -func (c Client) SetBucketPolicy(ctx context.Context, bucketName, policy string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If policy is empty then delete the bucket policy. - if policy == "" { - return c.removeBucketPolicy(ctx, bucketName) - } - - // Save the updated policies. - return c.putBucketPolicy(ctx, bucketName, policy) -} - -// Saves a new bucket policy. -func (c Client) putBucketPolicy(ctx context.Context, bucketName, policy string) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: strings.NewReader(policy), - contentLength: int64(len(policy)), - } - - // Execute PUT to upload a new bucket policy. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Removes all policies on a bucket. -func (c Client) removeBucketPolicy(ctx context.Context, bucketName string) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// GetBucketPolicy returns the current policy -func (c Client) GetBucketPolicy(ctx context.Context, bucketName string) (string, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - bucketPolicy, err := c.getBucketPolicy(ctx, bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchBucketPolicy" { - return "", nil - } - return "", err - } - return bucketPolicy, nil -} - -// Request server for current bucket policy. -func (c Client) getBucketPolicy(ctx context.Context, bucketName string) (string, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - policy := string(bucketPolicyBuf) - return policy, err -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go b/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go deleted file mode 100644 index 41054e13..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-replication.go +++ /dev/null @@ -1,228 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/json" - "encoding/xml" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/replication" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// RemoveBucketReplication removes a replication config on an existing bucket. -func (c Client) RemoveBucketReplication(ctx context.Context, bucketName string) error { - return c.removeBucketReplication(ctx, bucketName) -} - -// SetBucketReplication sets a replication config on an existing bucket. -func (c Client) SetBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If replication is empty then delete it. - if cfg.Empty() { - return c.removeBucketReplication(ctx, bucketName) - } - // Save the updated replication. - return c.putBucketReplication(ctx, bucketName, cfg) -} - -// Saves a new bucket replication. -func (c Client) putBucketReplication(ctx context.Context, bucketName string, cfg replication.Config) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication", "") - replication, err := xml.Marshal(cfg) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(replication), - contentLength: int64(len(replication)), - contentMD5Base64: sumMD5Base64(replication), - } - - // Execute PUT to upload a new bucket replication config. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - - return nil -} - -// Remove replication from a bucket. -func (c Client) removeBucketReplication(ctx context.Context, bucketName string) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// GetBucketReplication fetches bucket replication configuration.If config is not -// found, returns empty config with nil error. -func (c Client) GetBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return cfg, err - } - bucketReplicationCfg, err := c.getBucketReplication(ctx, bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "ReplicationConfigurationNotFoundError" { - return cfg, nil - } - return cfg, err - } - return bucketReplicationCfg, nil -} - -// Request server for current bucket replication config. -func (c Client) getBucketReplication(ctx context.Context, bucketName string) (cfg replication.Config, err error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication", "") - - // Execute GET on bucket to get replication config. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return cfg, err - } - - if resp.StatusCode != http.StatusOK { - return cfg, httpRespToErrorResponse(resp, bucketName, "") - } - - if err = xmlDecoder(resp.Body, &cfg); err != nil { - return cfg, err - } - - return cfg, nil -} - -// GetBucketReplicationMetrics fetches bucket replication status metrics -func (c Client) GetBucketReplicationMetrics(ctx context.Context, bucketName string) (s replication.Metrics, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return s, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication-metrics", "") - - // Execute GET on bucket to get replication config. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return s, err - } - - if resp.StatusCode != http.StatusOK { - return s, httpRespToErrorResponse(resp, bucketName, "") - } - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return s, err - } - - if err := json.Unmarshal(respBytes, &s); err != nil { - return s, err - } - return s, nil -} - -// ResetBucketReplication kicks off replication of previously replicated objects if ExistingObjectReplication -// is enabled in the replication config -func (c Client) ResetBucketReplication(ctx context.Context, bucketName string, olderThan time.Duration) (resetID string, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("replication-reset", "") - if olderThan > 0 { - urlValues.Set("older-than", olderThan.String()) - } - - // Execute GET on bucket to get replication config. - resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - if err := json.Unmarshal(respBytes, &resetID); err != nil { - return "", err - } - return resetID, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go b/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go deleted file mode 100644 index fcb966e6..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-tagging.go +++ /dev/null @@ -1,135 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "errors" - "io" - "io/ioutil" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/tags" -) - -// GetBucketTagging fetch tagging configuration for a bucket with a -// context to control cancellations and timeouts. -func (c Client) GetBucketTagging(ctx context.Context, bucketName string) (*tags.Tags, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - // Execute GET on bucket to get tagging configuration. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - - defer io.Copy(ioutil.Discard, resp.Body) - return tags.ParseBucketXML(resp.Body) -} - -// SetBucketTagging sets tagging configuration for a bucket -// with a context to control cancellations and timeouts. -func (c Client) SetBucketTagging(ctx context.Context, bucketName string, tags *tags.Tags) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if tags == nil { - return errors.New("nil tags passed") - } - - buf, err := xml.Marshal(tags) - if err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - // Content-length is mandatory to set a default encryption configuration - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(buf), - contentLength: int64(len(buf)), - contentMD5Base64: sumMD5Base64(buf), - } - - // Execute PUT on bucket to put tagging configuration. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} - -// RemoveBucketTagging removes tagging configuration for a -// bucket with a context to control cancellations and timeouts. -func (c Client) RemoveBucketTagging(ctx context.Context, bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - // Execute DELETE on bucket to remove tagging configuration. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - return nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go b/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go deleted file mode 100644 index e3ceeb33..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-bucket-versioning.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// SetBucketVersioning sets a bucket versioning configuration -func (c Client) SetBucketVersioning(ctx context.Context, bucketName string, config BucketVersioningConfiguration) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - buf, err := xml.Marshal(config) - if err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("versioning", "") - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(buf), - contentLength: int64(len(buf)), - contentMD5Base64: sumMD5Base64(buf), - contentSHA256Hex: sum256Hex(buf), - } - - // Execute PUT to set a bucket versioning. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// EnableVersioning - enable object versioning in given bucket. -func (c Client) EnableVersioning(ctx context.Context, bucketName string) error { - return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Enabled"}) -} - -// SuspendVersioning - suspend object versioning in given bucket. -func (c Client) SuspendVersioning(ctx context.Context, bucketName string) error { - return c.SetBucketVersioning(ctx, bucketName, BucketVersioningConfiguration{Status: "Suspended"}) -} - -// BucketVersioningConfiguration is the versioning configuration structure -type BucketVersioningConfiguration struct { - XMLName xml.Name `xml:"VersioningConfiguration"` - Status string `xml:"Status"` - MFADelete string `xml:"MfaDelete,omitempty"` -} - -// Various supported states -const ( - Enabled = "Enabled" - // Disabled State = "Disabled" only used by MFA Delete not supported yet. - Suspended = "Suspended" -) - -// Enabled returns true if bucket versioning is enabled -func (b BucketVersioningConfiguration) Enabled() bool { - return b.Status == Enabled -} - -// Suspended returns true if bucket versioning is suspended -func (b BucketVersioningConfiguration) Suspended() bool { - return b.Status == Suspended -} - -// GetBucketVersioning gets the versioning configuration on -// an existing bucket with a context to control cancellations and timeouts. -func (c Client) GetBucketVersioning(ctx context.Context, bucketName string) (BucketVersioningConfiguration, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return BucketVersioningConfiguration{}, err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("versioning", "") - - // Execute GET on bucket to get the versioning configuration. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return BucketVersioningConfiguration{}, err - } - - if resp.StatusCode != http.StatusOK { - return BucketVersioningConfiguration{}, httpRespToErrorResponse(resp, bucketName, "") - } - - versioningConfig := BucketVersioningConfiguration{} - if err = xmlDecoder(resp.Body, &versioningConfig); err != nil { - return versioningConfig, err - } - - return versioningConfig, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-compose-object.go b/mantle/vendor/github.com/minio/minio-go/v7/api-compose-object.go deleted file mode 100644 index 87dfae8d..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-compose-object.go +++ /dev/null @@ -1,580 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/google/uuid" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// CopyDestOptions represents options specified by user for CopyObject/ComposeObject APIs -type CopyDestOptions struct { - Bucket string // points to destination bucket - Object string // points to destination object - - // `Encryption` is the key info for server-side-encryption with customer - // provided key. If it is nil, no encryption is performed. - Encryption encrypt.ServerSide - - // `userMeta` is the user-metadata key-value pairs to be set on the - // destination. The keys are automatically prefixed with `x-amz-meta-` - // if needed. If nil is passed, and if only a single source (of any - // size) is provided in the ComposeObject call, then metadata from the - // source is copied to the destination. - // if no user-metadata is provided, it is copied from source - // (when there is only once source object in the compose - // request) - UserMetadata map[string]string - // UserMetadata is only set to destination if ReplaceMetadata is true - // other value is UserMetadata is ignored and we preserve src.UserMetadata - // NOTE: if you set this value to true and now metadata is present - // in UserMetadata your destination object will not have any metadata - // set. - ReplaceMetadata bool - - // `userTags` is the user defined object tags to be set on destination. - // This will be set only if the `replaceTags` field is set to true. - // Otherwise this field is ignored - UserTags map[string]string - ReplaceTags bool - - // Specifies whether you want to apply a Legal Hold to the copied object. - LegalHold LegalHoldStatus - - // Object Retention related fields - Mode RetentionMode - RetainUntilDate time.Time - - Size int64 // Needs to be specified if progress bar is specified. - // Progress of the entire copy operation will be sent here. - Progress io.Reader -} - -// Process custom-metadata to remove a `x-amz-meta-` prefix if -// present and validate that keys are distinct (after this -// prefix removal). -func filterCustomMeta(userMeta map[string]string) map[string]string { - m := make(map[string]string) - for k, v := range userMeta { - if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - k = k[len("x-amz-meta-"):] - } - if _, ok := m[k]; ok { - continue - } - m[k] = v - } - return m -} - -// Marshal converts all the CopyDestOptions into their -// equivalent HTTP header representation -func (opts CopyDestOptions) Marshal(header http.Header) { - const replaceDirective = "REPLACE" - if opts.ReplaceTags { - header.Set(amzTaggingHeaderDirective, replaceDirective) - if tags := s3utils.TagEncode(opts.UserTags); tags != "" { - header.Set(amzTaggingHeader, tags) - } - } - - if opts.LegalHold != LegalHoldStatus("") { - header.Set(amzLegalHoldHeader, opts.LegalHold.String()) - } - - if opts.Mode != RetentionMode("") && !opts.RetainUntilDate.IsZero() { - header.Set(amzLockMode, opts.Mode.String()) - header.Set(amzLockRetainUntil, opts.RetainUntilDate.Format(time.RFC3339)) - } - - if opts.Encryption != nil { - opts.Encryption.Marshal(header) - } - - if opts.ReplaceMetadata { - header.Set("x-amz-metadata-directive", replaceDirective) - for k, v := range filterCustomMeta(opts.UserMetadata) { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { - header.Set(k, v) - } else { - header.Set("x-amz-meta-"+k, v) - } - } - } -} - -// toDestinationInfo returns a validated copyOptions object. -func (opts CopyDestOptions) validate() (err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { - return err - } - if err = s3utils.CheckValidObjectName(opts.Object); err != nil { - return err - } - if opts.Progress != nil && opts.Size < 0 { - return errInvalidArgument("For progress bar effective size needs to be specified") - } - return nil -} - -// CopySrcOptions represents a source object to be copied, using -// server-side copying APIs. -type CopySrcOptions struct { - Bucket, Object string - VersionID string - MatchETag string - NoMatchETag string - MatchModifiedSince time.Time - MatchUnmodifiedSince time.Time - MatchRange bool - Start, End int64 - Encryption encrypt.ServerSide -} - -// Marshal converts all the CopySrcOptions into their -// equivalent HTTP header representation -func (opts CopySrcOptions) Marshal(header http.Header) { - // Set the source header - header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)) - if opts.VersionID != "" { - header.Set("x-amz-copy-source", s3utils.EncodePath(opts.Bucket+"/"+opts.Object)+"?versionId="+opts.VersionID) - } - - if opts.MatchETag != "" { - header.Set("x-amz-copy-source-if-match", opts.MatchETag) - } - if opts.NoMatchETag != "" { - header.Set("x-amz-copy-source-if-none-match", opts.NoMatchETag) - } - - if !opts.MatchModifiedSince.IsZero() { - header.Set("x-amz-copy-source-if-modified-since", opts.MatchModifiedSince.Format(http.TimeFormat)) - } - if !opts.MatchUnmodifiedSince.IsZero() { - header.Set("x-amz-copy-source-if-unmodified-since", opts.MatchUnmodifiedSince.Format(http.TimeFormat)) - } - - if opts.Encryption != nil { - encrypt.SSECopy(opts.Encryption).Marshal(header) - } -} - -func (opts CopySrcOptions) validate() (err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(opts.Bucket); err != nil { - return err - } - if err = s3utils.CheckValidObjectName(opts.Object); err != nil { - return err - } - if opts.Start > opts.End || opts.Start < 0 { - return errInvalidArgument("start must be non-negative, and start must be at most end.") - } - return nil -} - -// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. -func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, - metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { - - // Build headers. - headers := make(http.Header) - - // Set all the metadata headers. - for k, v := range metadata { - headers.Set(k, v) - } - if !dstOpts.Internal.ReplicationStatus.Empty() { - headers.Set(amzBucketReplicationStatus, string(dstOpts.Internal.ReplicationStatus)) - } - if !dstOpts.Internal.SourceMTime.IsZero() { - headers.Set(minIOBucketSourceMTime, dstOpts.Internal.SourceMTime.Format(time.RFC3339Nano)) - } - if dstOpts.Internal.SourceETag != "" { - headers.Set(minIOBucketSourceETag, dstOpts.Internal.SourceETag) - } - if dstOpts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") - } - if len(dstOpts.UserTags) != 0 { - headers.Set(amzTaggingHeader, s3utils.TagEncode(dstOpts.UserTags)) - } - - reqMetadata := requestMetadata{ - bucketName: destBucket, - objectName: destObject, - customHeader: headers, - } - if dstOpts.Internal.SourceVersionID != "" { - if _, err := uuid.Parse(dstOpts.Internal.SourceVersionID); err != nil { - return ObjectInfo{}, errInvalidArgument(err.Error()) - } - urlValues := make(url.Values) - urlValues.Set("versionId", dstOpts.Internal.SourceVersionID) - reqMetadata.queryValues = urlValues - } - - // Set the source header - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) - if srcOpts.VersionID != "" { - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)+"?versionId="+srcOpts.VersionID) - } - // Send upload-part-copy request - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) - } - - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return ObjectInfo{}, err - } - - objInfo := ObjectInfo{ - Key: destObject, - ETag: strings.Trim(cpObjRes.ETag, "\""), - LastModified: cpObjRes.LastModified, - } - return objInfo, nil -} - -func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { - - headers := make(http.Header) - - // Set source - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) - - if startOffset < 0 { - return p, errInvalidArgument("startOffset must be non-negative") - } - - if length >= 0 { - headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) - } - - for k, v := range metadata { - headers.Set(k, v) - } - - queryValues := make(url.Values) - queryValues.Set("partNumber", strconv.Itoa(partID)) - queryValues.Set("uploadId", uploadID) - - resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ - bucketName: destBucket, - objectName: destObject, - customHeader: headers, - queryValues: queryValues, - }) - defer closeResponse(resp) - if err != nil { - return - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return p, httpRespToErrorResponse(resp, destBucket, destObject) - } - - // Decode copy-part response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return p, err - } - p.PartNumber, p.ETag = partID, cpObjRes.ETag - return p, nil -} - -// uploadPartCopy - helper function to create a part in a multipart -// upload via an upload-part-copy request -// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html -func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, - headers http.Header) (p CompletePart, err error) { - - // Build query parameters - urlValues := make(url.Values) - urlValues.Set("partNumber", strconv.Itoa(partNumber)) - urlValues.Set("uploadId", uploadID) - - // Send upload-part-copy request - resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ - bucketName: bucket, - objectName: object, - customHeader: headers, - queryValues: urlValues, - }) - defer closeResponse(resp) - if err != nil { - return p, err - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return p, httpRespToErrorResponse(resp, bucket, object) - } - - // Decode copy-part response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return p, err - } - p.PartNumber, p.ETag = partNumber, cpObjRes.ETag - return p, nil -} - -// ComposeObject - creates an object using server-side copying -// of existing objects. It takes a list of source objects (with optional offsets) -// and concatenates them into a new object using only server-side copying -// operations. Optionally takes progress reader hook for applications to -// look at current progress. -func (c Client) ComposeObject(ctx context.Context, dst CopyDestOptions, srcs ...CopySrcOptions) (UploadInfo, error) { - if len(srcs) < 1 || len(srcs) > maxPartsCount { - return UploadInfo{}, errInvalidArgument("There must be as least one and up to 10000 source objects.") - } - - for _, src := range srcs { - if err := src.validate(); err != nil { - return UploadInfo{}, err - } - } - - if err := dst.validate(); err != nil { - return UploadInfo{}, err - } - - srcObjectInfos := make([]ObjectInfo, len(srcs)) - srcObjectSizes := make([]int64, len(srcs)) - var totalSize, totalParts int64 - var err error - for i, src := range srcs { - opts := StatObjectOptions{ServerSideEncryption: encrypt.SSE(src.Encryption), VersionID: src.VersionID} - srcObjectInfos[i], err = c.statObject(context.Background(), src.Bucket, src.Object, opts) - if err != nil { - return UploadInfo{}, err - } - - srcCopySize := srcObjectInfos[i].Size - // Check if a segment is specified, and if so, is the - // segment within object bounds? - if src.MatchRange { - // Since range is specified, - // 0 <= src.start <= src.end - // so only invalid case to check is: - if src.End >= srcCopySize || src.Start < 0 { - return UploadInfo{}, errInvalidArgument( - fmt.Sprintf("CopySrcOptions %d has invalid segment-to-copy [%d, %d] (size is %d)", - i, src.Start, src.End, srcCopySize)) - } - srcCopySize = src.End - src.Start + 1 - } - - // Only the last source may be less than `absMinPartSize` - if srcCopySize < absMinPartSize && i < len(srcs)-1 { - return UploadInfo{}, errInvalidArgument( - fmt.Sprintf("CopySrcOptions %d is too small (%d) and it is not the last part", i, srcCopySize)) - } - - // Is data to copy too large? - totalSize += srcCopySize - if totalSize > maxMultipartPutObjectSize { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) - } - - // record source size - srcObjectSizes[i] = srcCopySize - - // calculate parts needed for current source - totalParts += partsRequired(srcCopySize) - // Do we need more parts than we are allowed? - if totalParts > maxPartsCount { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf( - "Your proposed compose object requires more than %d parts", maxPartsCount)) - } - } - - // Single source object case (i.e. when only one source is - // involved, it is being copied wholly and at most 5GiB in - // size, emptyfiles are also supported). - if (totalParts == 1 && srcs[0].Start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { - return c.CopyObject(ctx, dst, srcs[0]) - } - - // Now, handle multipart-copy cases. - - // 1. Ensure that the object has not been changed while - // we are copying data. - for i, src := range srcs { - src.MatchETag = srcObjectInfos[i].ETag - } - - // 2. Initiate a new multipart upload. - - // Set user-metadata on the destination object. If no - // user-metadata is specified, and there is only one source, - // (only) then metadata from source is copied. - var userMeta map[string]string - if dst.ReplaceMetadata { - userMeta = dst.UserMetadata - } else { - userMeta = srcObjectInfos[0].UserMetadata - } - - var userTags map[string]string - if dst.ReplaceTags { - userTags = dst.UserTags - } else { - userTags = srcObjectInfos[0].UserTags - } - - uploadID, err := c.newUploadID(ctx, dst.Bucket, dst.Object, PutObjectOptions{ - ServerSideEncryption: dst.Encryption, - UserMetadata: userMeta, - UserTags: userTags, - Mode: dst.Mode, - RetainUntilDate: dst.RetainUntilDate, - LegalHold: dst.LegalHold, - }) - if err != nil { - return UploadInfo{}, err - } - - // 3. Perform copy part uploads - objParts := []CompletePart{} - partIndex := 1 - for i, src := range srcs { - var h = make(http.Header) - src.Marshal(h) - if dst.Encryption != nil && dst.Encryption.Type() == encrypt.SSEC { - dst.Encryption.Marshal(h) - } - - // calculate start/end indices of parts after - // splitting. - startIdx, endIdx := calculateEvenSplits(srcObjectSizes[i], src) - for j, start := range startIdx { - end := endIdx[j] - - // Add (or reset) source range header for - // upload part copy request. - h.Set("x-amz-copy-source-range", - fmt.Sprintf("bytes=%d-%d", start, end)) - - // make upload-part-copy request - complPart, err := c.uploadPartCopy(ctx, dst.Bucket, - dst.Object, uploadID, partIndex, h) - if err != nil { - return UploadInfo{}, err - } - if dst.Progress != nil { - io.CopyN(ioutil.Discard, dst.Progress, end-start+1) - } - objParts = append(objParts, complPart) - partIndex++ - } - } - - // 4. Make final complete-multipart request. - uploadInfo, err := c.completeMultipartUpload(ctx, dst.Bucket, dst.Object, uploadID, - completeMultipartUpload{Parts: objParts}, PutObjectOptions{}) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalSize - return uploadInfo, nil -} - -// partsRequired is maximum parts possible with -// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) -func partsRequired(size int64) int64 { - maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) - r := size / int64(maxPartSize) - if size%int64(maxPartSize) > 0 { - r++ - } - return r -} - -// calculateEvenSplits - computes splits for a source and returns -// start and end index slices. Splits happen evenly to be sure that no -// part is less than 5MiB, as that could fail the multipart request if -// it is not the last part. -func calculateEvenSplits(size int64, src CopySrcOptions) (startIndex, endIndex []int64) { - if size == 0 { - return - } - - reqParts := partsRequired(size) - startIndex = make([]int64, reqParts) - endIndex = make([]int64, reqParts) - // Compute number of required parts `k`, as: - // - // k = ceiling(size / copyPartSize) - // - // Now, distribute the `size` bytes in the source into - // k parts as evenly as possible: - // - // r parts sized (q+1) bytes, and - // (k - r) parts sized q bytes, where - // - // size = q * k + r (by simple division of size by k, - // so that 0 <= r < k) - // - start := src.Start - if start == -1 { - start = 0 - } - quot, rem := size/reqParts, size%reqParts - nextStart := start - for j := int64(0); j < reqParts; j++ { - curPartSize := quot - if j < rem { - curPartSize++ - } - - cStart := nextStart - cEnd := cStart + curPartSize - 1 - nextStart = cEnd + 1 - - startIndex[j], endIndex[j] = cStart, cEnd - } - return -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-copy-object.go b/mantle/vendor/github.com/minio/minio-go/v7/api-copy-object.go deleted file mode 100644 index 9af036ec..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-copy-object.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "io/ioutil" - "net/http" -) - -// CopyObject - copy a source object into a new object -func (c Client) CopyObject(ctx context.Context, dst CopyDestOptions, src CopySrcOptions) (UploadInfo, error) { - if err := src.validate(); err != nil { - return UploadInfo{}, err - } - - if err := dst.validate(); err != nil { - return UploadInfo{}, err - } - - header := make(http.Header) - dst.Marshal(header) - src.Marshal(header) - - resp, err := c.executeMethod(ctx, http.MethodPut, requestMetadata{ - bucketName: dst.Bucket, - objectName: dst.Object, - customHeader: header, - }) - if err != nil { - return UploadInfo{}, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return UploadInfo{}, httpRespToErrorResponse(resp, dst.Bucket, dst.Object) - } - - // Update the progress properly after successful copy. - if dst.Progress != nil { - io.Copy(ioutil.Discard, io.LimitReader(dst.Progress, dst.Size)) - } - - cpObjRes := copyObjectResult{} - if err = xmlDecoder(resp.Body, &cpObjRes); err != nil { - return UploadInfo{}, err - } - - // extract lifecycle expiry date and rule ID - expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) - - return UploadInfo{ - Bucket: dst.Bucket, - Key: dst.Object, - LastModified: cpObjRes.LastModified, - ETag: trimEtag(resp.Header.Get("ETag")), - VersionID: resp.Header.Get(amzVersionID), - Expiration: expTime, - ExpirationRuleID: ruleID, - }, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/mantle/vendor/github.com/minio/minio-go/v7/api-datatypes.go deleted file mode 100644 index 970e1fa5..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-datatypes.go +++ /dev/null @@ -1,173 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "io" - "net/http" - "time" -) - -// BucketInfo container for bucket metadata. -type BucketInfo struct { - // The name of the bucket. - Name string `json:"name"` - // Date the bucket was created. - CreationDate time.Time `json:"creationDate"` -} - -// StringMap represents map with custom UnmarshalXML -type StringMap map[string]string - -// UnmarshalXML unmarshals the XML into a map of string to strings, -// creating a key in the map for each tag and setting it's value to the -// tags contents. -// -// The fact this function is on the pointer of Map is important, so that -// if m is nil it can be initialized, which is often the case if m is -// nested in another xml structural. This is also why the first thing done -// on the first line is initialize it. -func (m *StringMap) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - *m = StringMap{} - type xmlMapEntry struct { - XMLName xml.Name - Value string `xml:",chardata"` - } - for { - var e xmlMapEntry - err := d.Decode(&e) - if err == io.EOF { - break - } else if err != nil { - return err - } - (*m)[e.XMLName.Local] = e.Value - } - return nil -} - -// Owner name. -type Owner struct { - DisplayName string `json:"name"` - ID string `json:"id"` -} - -// UploadInfo contains information about the -// newly uploaded or copied object. -type UploadInfo struct { - Bucket string - Key string - ETag string - Size int64 - LastModified time.Time - Location string - VersionID string - - // Lifecycle expiry-date and ruleID associated with the expiry - // not to be confused with `Expires` HTTP header. - Expiration time.Time - ExpirationRuleID string -} - -// ObjectInfo container for object metadata. -type ObjectInfo struct { - // An ETag is optionally set to md5sum of an object. In case of multipart objects, - // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of - // each parts concatenated into one string. - ETag string `json:"etag"` - - Key string `json:"name"` // Name of the object - LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. - Size int64 `json:"size"` // Size in bytes of the object. - ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. - Expires time.Time `json:"expires"` // The date and time at which the object is no longer able to be cached. - - // Collection of additional metadata on the object. - // eg: x-amz-meta-*, content-encoding etc. - Metadata http.Header `json:"metadata" xml:"-"` - - // x-amz-meta-* headers stripped "x-amz-meta-" prefix containing the first value. - UserMetadata StringMap `json:"userMetadata"` - - // x-amz-tagging values in their k/v values. - UserTags map[string]string `json:"userTags"` - - // x-amz-tagging-count value - UserTagCount int - - // Owner name. - Owner Owner - - // ACL grant. - Grant []struct { - Grantee struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` - } `xml:"Grantee"` - Permission string `xml:"Permission"` - } `xml:"Grant"` - - // The class of storage used to store the object. - StorageClass string `json:"storageClass"` - - // Versioning related information - IsLatest bool - IsDeleteMarker bool - VersionID string `xml:"VersionId"` - - // x-amz-replication-status value is either in one of the following states - // - COMPLETE - // - PENDING - // - FAILED - // - REPLICA (on the destination) - ReplicationStatus string `xml:"ReplicationStatus"` - - // Lifecycle expiry-date and ruleID associated with the expiry - // not to be confused with `Expires` HTTP header. - Expiration time.Time - ExpirationRuleID string - - // Error - Err error `json:"-"` -} - -// ObjectMultipartInfo container for multipart object metadata. -type ObjectMultipartInfo struct { - // Date and time at which the multipart upload was initiated. - Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Initiator initiator - Owner owner - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass string - - // Key of the object for which the multipart upload was initiated. - Key string - - // Size in bytes of the object. - Size int64 - - // Upload ID that identifies the multipart upload. - UploadID string `xml:"UploadId"` - - // Error - Err error -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-error-response.go b/mantle/vendor/github.com/minio/minio-go/v7/api-error-response.go deleted file mode 100644 index c45c4fdc..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-error-response.go +++ /dev/null @@ -1,271 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "fmt" - "net/http" -) - -/* **** SAMPLE ERROR RESPONSE **** - - - AccessDenied - Access Denied - bucketName - objectName - F19772218238A85A - GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD - -*/ - -// ErrorResponse - Is the typed error returned by all API operations. -// ErrorResponse struct should be comparable since it is compared inside -// golang http API (https://github.com/golang/go/issues/29768) -type ErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - BucketName string - Key string - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` - - // Region where the bucket is located. This header is returned - // only in HEAD bucket and ListObjects response. - Region string - - // Captures the server string returned in response header. - Server string - - // Underlying HTTP status code for the returned error - StatusCode int `xml:"-" json:"-"` -} - -// ToErrorResponse - Returns parsed ErrorResponse struct from body and -// http headers. -// -// For example: -// -// import s3 "github.com/minio/minio-go/v7" -// ... -// ... -// reader, stat, err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// } -// ... -func ToErrorResponse(err error) ErrorResponse { - switch err := err.(type) { - case ErrorResponse: - return err - default: - return ErrorResponse{} - } -} - -// Error - Returns S3 error string. -func (e ErrorResponse) Error() string { - if e.Message == "" { - msg, ok := s3ErrorResponseMap[e.Code] - if !ok { - msg = fmt.Sprintf("Error response code %s.", e.Code) - } - return msg - } - return e.Message -} - -// Common string for errors to report issue location in unexpected -// cases. -const ( - reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." -) - -// httpRespToErrorResponse returns a new encoded ErrorResponse -// structure as error. -func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { - if resp == nil { - msg := "Empty http response. " + reportIssue - return errInvalidArgument(msg) - } - - errResp := ErrorResponse{ - StatusCode: resp.StatusCode, - Server: resp.Header.Get("Server"), - } - - err := xmlDecoder(resp.Body, &errResp) - // Xml decoding failed with no body, fall back to HTTP headers. - if err != nil { - switch resp.StatusCode { - case http.StatusNotFound: - if objectName == "" { - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "NoSuchBucket", - Message: "The specified bucket does not exist.", - BucketName: bucketName, - } - } else { - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "NoSuchKey", - Message: "The specified key does not exist.", - BucketName: bucketName, - Key: objectName, - } - } - case http.StatusForbidden: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "AccessDenied", - Message: "Access Denied.", - BucketName: bucketName, - Key: objectName, - } - case http.StatusConflict: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "Conflict", - Message: "Bucket not empty.", - BucketName: bucketName, - } - case http.StatusPreconditionFailed: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "PreconditionFailed", - Message: s3ErrorResponseMap["PreconditionFailed"], - BucketName: bucketName, - Key: objectName, - } - default: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: resp.Status, - Message: resp.Status, - BucketName: bucketName, - } - } - } - - // Save hostID, requestID and region information - // from headers if not available through error XML. - if errResp.RequestID == "" { - errResp.RequestID = resp.Header.Get("x-amz-request-id") - } - if errResp.HostID == "" { - errResp.HostID = resp.Header.Get("x-amz-id-2") - } - if errResp.Region == "" { - errResp.Region = resp.Header.Get("x-amz-bucket-region") - } - if errResp.Code == "InvalidRegion" && errResp.Region != "" { - errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) - } - - return errResp -} - -// errTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. -func errTransferAccelerationBucket(bucketName string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", - BucketName: bucketName, - } -} - -// errEntityTooLarge - Input size is larger than supported maximum. -func errEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "EntityTooLarge", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// errEntityTooSmall - Input size is smaller than supported minimum. -func errEntityTooSmall(totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "EntityTooSmall", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// errUnexpectedEOF - Unexpected end of file reached. -func errUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "UnexpectedEOF", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// errInvalidBucketName - Invalid bucket name response. -func errInvalidBucketName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", - Message: message, - RequestID: "minio", - } -} - -// errInvalidObjectName - Invalid object name response. -func errInvalidObjectName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotFound, - Code: "NoSuchKey", - Message: message, - RequestID: "minio", - } -} - -// errInvalidArgument - Invalid argument response. -func errInvalidArgument(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: message, - RequestID: "minio", - } -} - -// errAPINotSupported - API not supported response -// The specified API call is not supported -func errAPINotSupported(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotImplemented, - Code: "APINotSupported", - Message: message, - RequestID: "minio", - } -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go b/mantle/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go deleted file mode 100644 index afa53079..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-get-object-acl.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net/http" - "net/url" -) - -type accessControlPolicy struct { - Owner struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - } `xml:"Owner"` - AccessControlList struct { - Grant []struct { - Grantee struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` - } `xml:"Grantee"` - Permission string `xml:"Permission"` - } `xml:"Grant"` - } `xml:"AccessControlList"` -} - -// GetObjectACL get object ACLs -func (c Client) GetObjectACL(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: url.Values{ - "acl": []string{""}, - }, - }) - if err != nil { - return nil, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - - res := &accessControlPolicy{} - - if err := xmlDecoder(resp.Body, res); err != nil { - return nil, err - } - - objInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{}) - if err != nil { - return nil, err - } - - objInfo.Owner.DisplayName = res.Owner.DisplayName - objInfo.Owner.ID = res.Owner.ID - - objInfo.Grant = append(objInfo.Grant, res.AccessControlList.Grant...) - - cannedACL := getCannedACL(res) - if cannedACL != "" { - objInfo.Metadata.Add("X-Amz-Acl", cannedACL) - return &objInfo, nil - } - - grantACL := getAmzGrantACL(res) - for k, v := range grantACL { - objInfo.Metadata[k] = v - } - - return &objInfo, nil -} - -func getCannedACL(aCPolicy *accessControlPolicy) string { - grants := aCPolicy.AccessControlList.Grant - - switch { - case len(grants) == 1: - if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { - return "private" - } - case len(grants) == 2: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { - return "authenticated-read" - } - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { - return "public-read" - } - if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { - return "bucket-owner-read" - } - } - case len(grants) == 3: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { - return "public-read-write" - } - } - } - return "" -} - -func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { - grants := aCPolicy.AccessControlList.Grant - res := map[string][]string{} - - for _, g := range grants { - switch { - case g.Permission == "READ": - res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) - case g.Permission == "WRITE": - res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) - case g.Permission == "READ_ACP": - res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) - case g.Permission == "WRITE_ACP": - res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) - case g.Permission == "FULL_CONTROL": - res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) - } - } - return res -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-get-object-file.go b/mantle/vendor/github.com/minio/minio-go/v7/api-get-object-file.go deleted file mode 100644 index bccff457..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-get-object-file.go +++ /dev/null @@ -1,127 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "os" - "path/filepath" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// FGetObject - download contents of an object to a local file. -// The options can be used to specify the GET request further. -func (c Client) FGetObject(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Verify if destination already exists. - st, err := os.Stat(filePath) - if err == nil { - // If the destination exists and is a directory. - if st.IsDir() { - return errInvalidArgument("fileName is a directory.") - } - } - - // Proceed if file does not exist. return for all other errors. - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - // Extract top level directory. - objectDir, _ := filepath.Split(filePath) - if objectDir != "" { - // Create any missing top level directories. - if err := os.MkdirAll(objectDir, 0700); err != nil { - return err - } - } - - // Gather md5sum. - objectStat, err := c.StatObject(ctx, bucketName, objectName, StatObjectOptions(opts)) - if err != nil { - return err - } - - // Write to a temporary file "fileName.part.minio" before saving. - filePartPath := filePath + objectStat.ETag + ".part.minio" - - // If exists, open in append mode. If not create it as a part file. - filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - return err - } - - // If we return early with an error, be sure to close and delete - // filePart. If we have an error along the way there is a chance - // that filePart is somehow damaged, and we should discard it. - closeAndRemove := true - defer func() { - if closeAndRemove { - _ = filePart.Close() - _ = os.Remove(filePartPath) - } - }() - - // Issue Stat to get the current offset. - st, err = filePart.Stat() - if err != nil { - return err - } - - // Initialize get object request headers to set the - // appropriate range offsets to read from. - if st.Size() > 0 { - opts.SetRange(st.Size(), 0) - } - - // Seek to current position for incoming reader. - objectReader, objectStat, _, err := c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - return err - } - - // Write to the part file. - if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { - return err - } - - // Close the file before rename, this is specifically needed for Windows users. - closeAndRemove = false - if err = filePart.Close(); err != nil { - return err - } - - // Safely completed. Now commit by renaming to actual filename. - if err = os.Rename(filePartPath, filePath); err != nil { - return err - } - - // Return. - return nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-get-object.go b/mantle/vendor/github.com/minio/minio-go/v7/api-get-object.go deleted file mode 100644 index ef9dd45d..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-get-object.go +++ /dev/null @@ -1,681 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "sync" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// GetObject wrapper function that accepts a request context -func (c Client) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - - // Detect if snowball is server location we are talking to. - var snowball bool - if location, ok := c.bucketLocCache.Get(bucketName); ok { - if location == "snowball" { - snowball = true - } - } - - var ( - err error - httpReader io.ReadCloser - objectInfo ObjectInfo - totalRead int - ) - - // Create request channel. - reqCh := make(chan getRequest) - // Create response channel. - resCh := make(chan getResponse) - // Create done channel. - doneCh := make(chan struct{}) - - // This routine feeds partial object data as and when the caller reads. - go func() { - defer close(reqCh) - defer close(resCh) - - // Used to verify if etag of object has changed since last read. - var etag string - - // Loop through the incoming control messages and read data. - for { - select { - // When the done channel is closed exit our routine. - case <-doneCh: - // Close the http response body before returning. - // This ends the connection with the server. - if httpReader != nil { - httpReader.Close() - } - return - - // Gather incoming request. - case req := <-reqCh: - // If this is the first request we may not need to do a getObject request yet. - if req.isFirstReq { - // First request is a Read/ReadAt. - if req.isReadOp { - // Differentiate between wanting the whole object and just a range. - if req.isReadAt { - // If this is a ReadAt request only get the specified range. - // Range is set with respect to the offset and length of the buffer requested. - // Do not set objectInfo from the first readAt request because it will not get - // the whole object. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { - opts.SetRange(req.Offset, 0) - } - httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{Error: err} - return - } - etag = objectInfo.ETag - // Read at least firstReq.Buffer bytes, if not we have - // reached our EOF. - size, err := readFull(httpReader, req.Buffer) - totalRead += size - if size > 0 && err == io.ErrUnexpectedEOF { - if int64(size) < objectInfo.Size { - // In situations when returned size - // is less than the expected content - // length set by the server, make sure - // we return io.ErrUnexpectedEOF - err = io.ErrUnexpectedEOF - } else { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { - // Special cases when server writes more data - // than the content-length, net/http response - // body returns an error, instead of converting - // it to io.EOF - return unexpected EOF. - err = io.ErrUnexpectedEOF - } - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - Size: size, - Error: err, - didRead: true, - } - } else { - // First request is a Stat or Seek call. - // Only need to run a StatObject until an actual Read or ReadAt request comes through. - - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the go-routine. - return - } - etag = objectInfo.ETag - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } - } else if req.settingObjectInfo { // Request is just to get objectInfo. - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - // Check whether this is snowball - // if yes do not use If-Match feature - // it doesn't work. - if etag != "" && !snowball { - opts.SetMatchETag(etag) - } - objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions(opts)) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the goroutine. - return - } - // Send back the objectInfo. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } else { - // Offset changes fetch the new object at an Offset. - // Because the httpReader may not be set by the first - // request if it was a stat or seek it must be checked - // if the object has been read or not to only initialize - // new ones when they haven't been already. - // All readAt requests are new requests. - if req.DidOffsetChange || !req.beenRead { - // Check whether this is snowball - // if yes do not use If-Match feature - // it doesn't work. - if etag != "" && !snowball { - opts.SetMatchETag(etag) - } - if httpReader != nil { - // Close previously opened http reader. - httpReader.Close() - } - // If this request is a readAt only get the specified range. - if req.isReadAt { - // Range is set with respect to the offset and length of the buffer requested. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { // Range is set with respect to the offset. - opts.SetRange(req.Offset, 0) - } - httpReader, objectInfo, _, err = c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{ - Error: err, - } - return - } - totalRead = 0 - } - - // Read at least req.Buffer bytes, if not we have - // reached our EOF. - size, err := readFull(httpReader, req.Buffer) - totalRead += size - if size > 0 && err == io.ErrUnexpectedEOF { - if int64(totalRead) < objectInfo.Size { - // In situations when returned size - // is less than the expected content - // length set by the server, make sure - // we return io.ErrUnexpectedEOF - err = io.ErrUnexpectedEOF - } else { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - } else if size == 0 && err == io.EOF && objectInfo.Size > 0 { - // Special cases when server writes more data - // than the content-length, net/http response - // body returns an error, instead of converting - // it to io.EOF - return unexpected EOF. - err = io.ErrUnexpectedEOF - } - - // Reply back how much was read. - resCh <- getResponse{ - Size: size, - Error: err, - didRead: true, - objectInfo: objectInfo, - } - } - } - } - }() - - // Create a newObject through the information sent back by reqCh. - return newObject(reqCh, resCh, doneCh), nil -} - -// get request message container to communicate with internal -// go-routine. -type getRequest struct { - Buffer []byte - Offset int64 // readAt offset. - DidOffsetChange bool // Tracks the offset changes for Seek requests. - beenRead bool // Determines if this is the first time an object is being read. - isReadAt bool // Determines if this request is a request to a specific range - isReadOp bool // Determines if this request is a Read or Read/At request. - isFirstReq bool // Determines if this request is the first time an object is being accessed. - settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. -} - -// get response message container to reply back for the request. -type getResponse struct { - Size int - Error error - didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. - objectInfo ObjectInfo // Used for the first request. -} - -// Object represents an open object. It implements -// Reader, ReaderAt, Seeker, Closer for a HTTP stream. -type Object struct { - // Mutex. - mutex *sync.Mutex - - // User allocated and defined. - reqCh chan<- getRequest - resCh <-chan getResponse - doneCh chan<- struct{} - currOffset int64 - objectInfo ObjectInfo - - // Ask lower level to initiate data fetching based on currOffset - seekData bool - - // Keeps track of closed call. - isClosed bool - - // Keeps track of if this is the first call. - isStarted bool - - // Previous error saved for future calls. - prevErr error - - // Keeps track of if this object has been read yet. - beenRead bool - - // Keeps track of if objectInfo has been set yet. - objectInfoSet bool -} - -// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. -// Returns back the size of the buffer read, if anything was read, as well -// as any error encountered. For all first requests sent on the object -// it is also responsible for sending back the objectInfo. -func (o *Object) doGetRequest(request getRequest) (getResponse, error) { - o.reqCh <- request - response := <-o.resCh - - // Return any error to the top level. - if response.Error != nil { - return response, response.Error - } - - // This was the first request. - if !o.isStarted { - // The object has been operated on. - o.isStarted = true - } - // Set the objectInfo if the request was not readAt - // and it hasn't been set before. - if !o.objectInfoSet && !request.isReadAt { - o.objectInfo = response.objectInfo - o.objectInfoSet = true - } - // Set beenRead only if it has not been set before. - if !o.beenRead { - o.beenRead = response.didRead - } - // Data are ready on the wire, no need to reinitiate connection in lower level - o.seekData = false - - return response, nil -} - -// setOffset - handles the setting of offsets for -// Read/ReadAt/Seek requests. -func (o *Object) setOffset(bytesRead int64) error { - // Update the currentOffset. - o.currOffset += bytesRead - - if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { - return io.EOF - } - return nil -} - -// Read reads up to len(b) bytes into b. It returns the number of -// bytes read (0 <= n <= len(b)) and any error encountered. Returns -// io.EOF upon end of file. -func (o *Object) Read(b []byte) (n int, err error) { - if o == nil { - return 0, errInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is previous error saved from previous operation. - if o.prevErr != nil || o.isClosed { - return 0, o.prevErr - } - - // Create a new request. - readReq := getRequest{ - isReadOp: true, - beenRead: o.beenRead, - Buffer: b, - } - - // Alert that this is the first request. - if !o.isStarted { - readReq.isFirstReq = true - } - - // Ask to establish a new data fetch routine based on seekData flag - readReq.DidOffsetChange = o.seekData - readReq.Offset = o.currOffset - - // Send and receive from the first request. - response, err := o.doGetRequest(readReq) - if err != nil && err != io.EOF { - // Save the error for future calls. - o.prevErr = err - return response.Size, err - } - - // Bytes read. - bytesRead := int64(response.Size) - - // Set the new offset. - oerr := o.setOffset(bytesRead) - if oerr != nil { - // Save the error for future calls. - o.prevErr = oerr - return response.Size, oerr - } - - // Return the response. - return response.Size, err -} - -// Stat returns the ObjectInfo structure describing Object. -func (o *Object) Stat() (ObjectInfo, error) { - if o == nil { - return ObjectInfo{}, errInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { - return ObjectInfo{}, o.prevErr - } - - // This is the first request. - if !o.isStarted || !o.objectInfoSet { - // Send the request and get the response. - _, err := o.doGetRequest(getRequest{ - isFirstReq: !o.isStarted, - settingObjectInfo: !o.objectInfoSet, - }) - if err != nil { - o.prevErr = err - return ObjectInfo{}, err - } - } - - return o.objectInfo, nil -} - -// ReadAt reads len(b) bytes from the File starting at byte offset -// off. It returns the number of bytes read and the error, if any. -// ReadAt always returns a non-nil error when n < len(b). At end of -// file, that error is io.EOF. -func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { - if o == nil { - return 0, errInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is error which was saved in previous operation. - if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { - return 0, o.prevErr - } - - // Set the current offset to ReadAt offset, because the current offset will be shifted at the end of this method. - o.currOffset = offset - - // Can only compare offsets to size when size has been set. - if o.objectInfoSet { - // If offset is negative than we return io.EOF. - // If offset is greater than or equal to object size we return io.EOF. - if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { - return 0, io.EOF - } - } - - // Create the new readAt request. - readAtReq := getRequest{ - isReadOp: true, - isReadAt: true, - DidOffsetChange: true, // Offset always changes. - beenRead: o.beenRead, // Set if this is the first request to try and read. - Offset: offset, // Set the offset. - Buffer: b, - } - - // Alert that this is the first request. - if !o.isStarted { - readAtReq.isFirstReq = true - } - - // Send and receive from the first request. - response, err := o.doGetRequest(readAtReq) - if err != nil && err != io.EOF { - // Save the error. - o.prevErr = err - return response.Size, err - } - // Bytes read. - bytesRead := int64(response.Size) - // There is no valid objectInfo yet - // to compare against for EOF. - if !o.objectInfoSet { - // Update the currentOffset. - o.currOffset += bytesRead - } else { - // If this was not the first request update - // the offsets and compare against objectInfo - // for EOF. - oerr := o.setOffset(bytesRead) - if oerr != nil { - o.prevErr = oerr - return response.Size, oerr - } - } - return response.Size, err -} - -// Seek sets the offset for the next Read or Write to offset, -// interpreted according to whence: 0 means relative to the -// origin of the file, 1 means relative to the current offset, -// and 2 means relative to the end. -// Seek returns the new offset and an error, if any. -// -// Seeking to a negative offset is an error. Seeking to any positive -// offset is legal, subsequent io operations succeed until the -// underlying object is not closed. -func (o *Object) Seek(offset int64, whence int) (n int64, err error) { - if o == nil { - return 0, errInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // At EOF seeking is legal allow only io.EOF, for any other errors we return. - if o.prevErr != nil && o.prevErr != io.EOF { - return 0, o.prevErr - } - - // Negative offset is valid for whence of '2'. - if offset < 0 && whence != 2 { - return 0, errInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) - } - - // This is the first request. So before anything else - // get the ObjectInfo. - if !o.isStarted || !o.objectInfoSet { - // Create the new Seek request. - seekReq := getRequest{ - isReadOp: false, - Offset: offset, - isFirstReq: true, - } - // Send and receive from the seek request. - _, err := o.doGetRequest(seekReq) - if err != nil { - // Save the error. - o.prevErr = err - return 0, err - } - } - - // Switch through whence. - switch whence { - default: - return 0, errInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) - case 0: - if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { - return 0, io.EOF - } - o.currOffset = offset - case 1: - if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { - return 0, io.EOF - } - o.currOffset += offset - case 2: - // If we don't know the object size return an error for io.SeekEnd - if o.objectInfo.Size < 0 { - return 0, errInvalidArgument("Whence END is not supported when the object size is unknown") - } - // Seeking to positive offset is valid for whence '2', but - // since we are backing a Reader we have reached 'EOF' if - // offset is positive. - if offset > 0 { - return 0, io.EOF - } - // Seeking to negative position not allowed for whence. - if o.objectInfo.Size+offset < 0 { - return 0, errInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) - } - o.currOffset = o.objectInfo.Size + offset - } - // Reset the saved error since we successfully seeked, let the Read - // and ReadAt decide. - if o.prevErr == io.EOF { - o.prevErr = nil - } - - // Ask lower level to fetch again from source - o.seekData = true - - // Return the effective offset. - return o.currOffset, nil -} - -// Close - The behavior of Close after the first call returns error -// for subsequent Close() calls. -func (o *Object) Close() (err error) { - if o == nil { - return errInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // if already closed return an error. - if o.isClosed { - return o.prevErr - } - - // Close successfully. - close(o.doneCh) - - // Save for future operations. - errMsg := "Object is already closed. Bad file descriptor." - o.prevErr = errors.New(errMsg) - // Save here that we closed done channel successfully. - o.isClosed = true - return nil -} - -// newObject instantiates a new *minio.Object* -// ObjectInfo will be set by setObjectInfo -func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { - return &Object{ - mutex: &sync.Mutex{}, - reqCh: reqCh, - resCh: resCh, - doneCh: doneCh, - } -} - -// getObject - retrieve object from Object Storage. -// -// Additionally this function also takes range arguments to download the specified -// range bytes of an object. Setting offset and length = 0 will download the full object. -// -// For more information about the HTTP Range header. -// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { - // Validate input arguments. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, ObjectInfo{}, nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, ObjectInfo{}, nil, err - } - - urlValues := make(url.Values) - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - // Execute GET on objectName. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: opts.Header(), - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - return nil, ObjectInfo{}, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return nil, ObjectInfo{}, nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - objectStat, err := ToObjectInfo(bucketName, objectName, resp.Header) - if err != nil { - closeResponse(resp) - return nil, ObjectInfo{}, nil, err - } - - // do not close body here, caller will close - return resp.Body, objectStat, resp.Header, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-get-options.go b/mantle/vendor/github.com/minio/minio-go/v7/api-get-options.go deleted file mode 100644 index 9e0cb214..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-get-options.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "fmt" - "net/http" - "time" - - "github.com/minio/minio-go/v7/pkg/encrypt" -) - -//AdvancedGetOptions for internal use by MinIO server - not intended for client use. -type AdvancedGetOptions struct { - ReplicationDeleteMarker bool - ReplicationProxyRequest string -} - -// GetObjectOptions are used to specify additional headers or options -// during GET requests. -type GetObjectOptions struct { - headers map[string]string - ServerSideEncryption encrypt.ServerSide - VersionID string - // To be not used by external applications - Internal AdvancedGetOptions -} - -// StatObjectOptions are used to specify additional headers or options -// during GET info/stat requests. -type StatObjectOptions = GetObjectOptions - -// Header returns the http.Header representation of the GET options. -func (o GetObjectOptions) Header() http.Header { - headers := make(http.Header, len(o.headers)) - for k, v := range o.headers { - headers.Set(k, v) - } - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { - o.ServerSideEncryption.Marshal(headers) - } - // this header is set for active-active replication scenario where GET/HEAD - // to site A is proxy'd to site B if object/version missing on site A. - if o.Internal.ReplicationProxyRequest != "" { - headers.Set(minIOBucketReplicationProxyRequest, o.Internal.ReplicationProxyRequest) - } - return headers -} - -// Set adds a key value pair to the options. The -// key-value pair will be part of the HTTP GET request -// headers. -func (o *GetObjectOptions) Set(key, value string) { - if o.headers == nil { - o.headers = make(map[string]string) - } - o.headers[http.CanonicalHeaderKey(key)] = value -} - -// SetMatchETag - set match etag. -func (o *GetObjectOptions) SetMatchETag(etag string) error { - if etag == "" { - return errInvalidArgument("ETag cannot be empty.") - } - o.Set("If-Match", "\""+etag+"\"") - return nil -} - -// SetMatchETagExcept - set match etag except. -func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { - if etag == "" { - return errInvalidArgument("ETag cannot be empty.") - } - o.Set("If-None-Match", "\""+etag+"\"") - return nil -} - -// SetUnmodified - set unmodified time since. -func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { - if modTime.IsZero() { - return errInvalidArgument("Modified since cannot be empty.") - } - o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetModified - set modified time since. -func (o *GetObjectOptions) SetModified(modTime time.Time) error { - if modTime.IsZero() { - return errInvalidArgument("Modified since cannot be empty.") - } - o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetRange - set the start and end offset of the object to be read. -// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. -func (o *GetObjectOptions) SetRange(start, end int64) error { - switch { - case start == 0 && end < 0: - // Read last '-end' bytes. `bytes=-N`. - o.Set("Range", fmt.Sprintf("bytes=%d", end)) - case 0 < start && end == 0: - // Read everything starting from offset - // 'start'. `bytes=N-`. - o.Set("Range", fmt.Sprintf("bytes=%d-", start)) - case 0 <= start && start <= end: - // Read everything starting at 'start' till the - // 'end'. `bytes=N-M` - o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) - default: - // All other cases such as - // bytes=-3- - // bytes=5-3 - // bytes=-2-4 - // bytes=-3-0 - // bytes=-3--2 - // are invalid. - return errInvalidArgument( - fmt.Sprintf( - "Invalid range specified: start=%d end=%d", - start, end)) - } - return nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-list.go b/mantle/vendor/github.com/minio/minio-go/v7/api-list.go deleted file mode 100644 index 814ec29c..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-list.go +++ /dev/null @@ -1,965 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "fmt" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// ListBuckets list all buckets owned by this authenticated user. -// -// This call requires explicit authentication, no anonymous requests are -// allowed for listing buckets. -// -// api := client.New(....) -// for message := range api.ListBuckets(context.Background()) { -// fmt.Println(message) -// } -// -func (c Client) ListBuckets(ctx context.Context) ([]BucketInfo, error) { - // Execute GET on service. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{contentSHA256Hex: emptySHA256Hex}) - defer closeResponse(resp) - if err != nil { - return nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, "", "") - } - } - listAllMyBucketsResult := listAllMyBucketsResult{} - err = xmlDecoder(resp.Body, &listAllMyBucketsResult) - if err != nil { - return nil, err - } - return listAllMyBucketsResult.Buckets.Bucket, nil -} - -/// Bucket Read Operations. - -func (c Client) listObjectsV2(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { - // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if opts.Recursive { - // If recursive we do not delimit. - delimiter = "" - } - - // Return object owner information by default - fetchOwner := true - - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectInfo) { - defer close(objectStatCh) - // Save continuationToken for next request. - var continuationToken string - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsV2Query(ctx, bucketName, opts.Prefix, continuationToken, - fetchOwner, opts.WithMetadata, delimiter, opts.MaxKeys, opts.headers) - if err != nil { - objectStatCh <- ObjectInfo{ - Err: err, - } - return - } - - // If contents are available loop through and send over channel. - for _, object := range result.Contents { - object.ETag = trimEtag(object.ETag) - select { - // Send object content. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case objectStatCh <- ObjectInfo{Key: obj.Prefix}: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // If continuation token present, save it for next request. - if result.NextContinuationToken != "" { - continuationToken = result.NextContinuationToken - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectStatCh) - return objectStatCh -} - -// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?continuation-token - Used to continue iterating over a set of objects -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -// ?metadata - Specifies if we want metadata for the objects as part of list operation. -func (c Client) listObjectsV2Query(ctx context.Context, bucketName, objectPrefix, continuationToken string, fetchOwner, metadata bool, delimiter string, maxkeys int, headers http.Header) (ListBucketV2Result, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListBucketV2Result{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return ListBucketV2Result{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Always set list-type in ListObjects V2 - urlValues.Set("list-type", "2") - - if metadata { - urlValues.Set("metadata", "true") - } - - // Always set encoding-type in ListObjects V2 - urlValues.Set("encoding-type", "url") - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", objectPrefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set continuation token - if continuationToken != "" { - urlValues.Set("continuation-token", continuationToken) - } - - // Fetch owner when listing - if fetchOwner { - urlValues.Set("fetch-owner", "true") - } - - // Set max keys. - if maxkeys > 0 { - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - } - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ListBucketV2Result{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Decode listBuckets XML. - listBucketResult := ListBucketV2Result{} - if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { - return listBucketResult, err - } - - // This is an additional verification check to make - // sure proper responses are received. - if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { - return listBucketResult, ErrorResponse{ - Code: "NotImplemented", - Message: "Truncated response should have continuation token set", - } - } - - for i, obj := range listBucketResult.Contents { - listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - } - - for i, obj := range listBucketResult.CommonPrefixes { - listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - } - - // Success. - return listBucketResult, nil -} - -func (c Client) listObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { - // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if opts.Recursive { - // If recursive we do not delimit. - delimiter = "" - } - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectInfo) { - defer close(objectStatCh) - - marker := "" - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsQuery(ctx, bucketName, opts.Prefix, marker, delimiter, opts.MaxKeys, opts.headers) - if err != nil { - objectStatCh <- ObjectInfo{ - Err: err, - } - return - } - - // If contents are available loop through and send over channel. - for _, object := range result.Contents { - // Save the marker. - marker = object.Key - select { - // Send object content. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case objectStatCh <- ObjectInfo{Key: obj.Prefix}: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // If next marker present, save it for next request. - if result.NextMarker != "" { - marker = result.NextMarker - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectStatCh) - return objectStatCh -} - -func (c Client) listObjectVersions(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { - // Allocate new list objects channel. - resultCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if opts.Recursive { - // If recursive we do not delimit. - delimiter = "" - } - - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(resultCh) - resultCh <- ObjectInfo{ - Err: err, - } - return resultCh - } - - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(opts.Prefix); err != nil { - defer close(resultCh) - resultCh <- ObjectInfo{ - Err: err, - } - return resultCh - } - - // Initiate list objects goroutine here. - go func(resultCh chan<- ObjectInfo) { - defer close(resultCh) - - var ( - keyMarker = "" - versionIDMarker = "" - ) - - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectVersionsQuery(ctx, bucketName, opts.Prefix, keyMarker, versionIDMarker, delimiter, opts.MaxKeys, opts.headers) - if err != nil { - resultCh <- ObjectInfo{ - Err: err, - } - return - } - - // If contents are available loop through and send over channel. - for _, version := range result.Versions { - info := ObjectInfo{ - ETag: trimEtag(version.ETag), - Key: version.Key, - LastModified: version.LastModified, - Size: version.Size, - Owner: version.Owner, - StorageClass: version.StorageClass, - IsLatest: version.IsLatest, - VersionID: version.VersionID, - - IsDeleteMarker: version.isDeleteMarker, - } - select { - // Send object version info. - case resultCh <- info: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case resultCh <- ObjectInfo{Key: obj.Prefix}: - // If receives done from the caller, return here. - case <-ctx.Done(): - return - } - } - - // If next key marker is present, save it for next request. - if result.NextKeyMarker != "" { - keyMarker = result.NextKeyMarker - } - - // If next version id marker is present, save it for next request. - if result.NextVersionIDMarker != "" { - versionIDMarker = result.NextVersionIDMarker - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(resultCh) - return resultCh -} - -// listObjectVersions - (List Object Versions) - List some or all (up to 1000) of the existing objects -// and their versions in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?key-marker - Specifies the key to start with when listing objects in a bucket. -// ?version-id-marker - Specifies the version id marker to start with when listing objects with versions in a bucket. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectVersionsQuery(ctx context.Context, bucketName, prefix, keyMarker, versionIDMarker, delimiter string, maxkeys int, headers http.Header) (ListVersionsResult, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListVersionsResult{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(prefix); err != nil { - return ListVersionsResult{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Set versions to trigger versioning API - urlValues.Set("versions", "") - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", prefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set object marker. - if keyMarker != "" { - urlValues.Set("key-marker", keyMarker) - } - - // Set max keys. - if maxkeys > 0 { - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - } - - // Set version ID marker - if versionIDMarker != "" { - urlValues.Set("version-id-marker", versionIDMarker) - } - - // Always set encoding-type - urlValues.Set("encoding-type", "url") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ListVersionsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListVersionsResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Decode ListVersionsResult XML. - listObjectVersionsOutput := ListVersionsResult{} - err = xmlDecoder(resp.Body, &listObjectVersionsOutput) - if err != nil { - return ListVersionsResult{}, err - } - - for i, obj := range listObjectVersionsOutput.Versions { - listObjectVersionsOutput.Versions[i].Key, err = decodeS3Name(obj.Key, listObjectVersionsOutput.EncodingType) - if err != nil { - return listObjectVersionsOutput, err - } - } - - for i, obj := range listObjectVersionsOutput.CommonPrefixes { - listObjectVersionsOutput.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listObjectVersionsOutput.EncodingType) - if err != nil { - return listObjectVersionsOutput, err - } - } - - if listObjectVersionsOutput.NextKeyMarker != "" { - listObjectVersionsOutput.NextKeyMarker, err = decodeS3Name(listObjectVersionsOutput.NextKeyMarker, listObjectVersionsOutput.EncodingType) - if err != nil { - return listObjectVersionsOutput, err - } - } - - return listObjectVersionsOutput, nil -} - -// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?marker - Specifies the key to start with when listing objects in a bucket. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsQuery(ctx context.Context, bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int, headers http.Header) (ListBucketResult, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListBucketResult{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return ListBucketResult{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", objectPrefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set object marker. - if objectMarker != "" { - urlValues.Set("marker", objectMarker) - } - - // Set max keys. - if maxkeys > 0 { - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - } - - // Always set encoding-type - urlValues.Set("encoding-type", "url") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ListBucketResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - // Decode listBuckets XML. - listBucketResult := ListBucketResult{} - err = xmlDecoder(resp.Body, &listBucketResult) - if err != nil { - return listBucketResult, err - } - - for i, obj := range listBucketResult.Contents { - listBucketResult.Contents[i].Key, err = decodeS3Name(obj.Key, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - } - - for i, obj := range listBucketResult.CommonPrefixes { - listBucketResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - } - - if listBucketResult.NextMarker != "" { - listBucketResult.NextMarker, err = decodeS3Name(listBucketResult.NextMarker, listBucketResult.EncodingType) - if err != nil { - return listBucketResult, err - } - } - - return listBucketResult, nil -} - -// ListObjectsOptions holds all options of a list object request -type ListObjectsOptions struct { - // Include objects versions in the listing - WithVersions bool - // Include objects metadata in the listing - WithMetadata bool - // Only list objects with the prefix - Prefix string - // Ignore '/' delimiter - Recursive bool - // The maximum number of objects requested per - // batch, advanced use-case not useful for most - // applications - MaxKeys int - - // Use the deprecated list objects V1 API - UseV1 bool - - headers http.Header -} - -// Set adds a key value pair to the options. The -// key-value pair will be part of the HTTP GET request -// headers. -func (o *ListObjectsOptions) Set(key, value string) { - if o.headers == nil { - o.headers = make(http.Header) - } - o.headers.Set(key, value) -} - -// ListObjects returns objects list after evaluating the passed options. -// -// api := client.New(....) -// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) { -// fmt.Println(object) -// } -// -func (c Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo { - if opts.WithVersions { - return c.listObjectVersions(ctx, bucketName, opts) - } - - // Use legacy list objects v1 API - if opts.UseV1 { - return c.listObjects(ctx, bucketName, opts) - } - - // Check whether this is snowball region, if yes ListObjectsV2 doesn't work, fallback to listObjectsV1. - if location, ok := c.bucketLocCache.Get(bucketName); ok { - if location == "snowball" { - return c.listObjects(ctx, bucketName, opts) - } - } - - return c.listObjectsV2(ctx, bucketName, opts) -} - -// ListIncompleteUploads - List incompletely uploaded multipart objects. -// -// ListIncompleteUploads lists all incompleted objects matching the -// objectPrefix from the specified bucket. If recursion is enabled -// it would list all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive. -// If you enable recursive as 'true' this function will return back all -// the multipart objects in a given bucket name. -// -// api := client.New(....) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListIncompleteUploads(context.Background(), "mytestbucket", "starthere", recursive) { -// fmt.Println(message) -// } -func (c Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { - return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive) -} - -// listIncompleteUploads lists all incomplete uploads. -func (c Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo { - // Allocate channel for multipart uploads. - objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) - // Delimiter is set to "/" by default. - delimiter := "/" - if recursive { - // If recursive do not delimit. - delimiter = "" - } - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return objectMultipartStatCh - } - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return objectMultipartStatCh - } - go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { - defer close(objectMultipartStatCh) - // object and upload ID marker for future requests. - var objectMarker string - var uploadIDMarker string - for { - // list all multipart uploads. - result, err := c.listMultipartUploadsQuery(ctx, bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 0) - if err != nil { - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return - } - objectMarker = result.NextKeyMarker - uploadIDMarker = result.NextUploadIDMarker - - // Send all multipart uploads. - for _, obj := range result.Uploads { - // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. - select { - // Send individual uploads here. - case objectMultipartStatCh <- obj: - // If the context is canceled - case <-ctx.Done(): - return - } - } - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send delimited prefixes here. - case objectMultipartStatCh <- ObjectMultipartInfo{Key: obj.Prefix, Size: 0}: - // If context is canceled. - case <-ctx.Done(): - return - } - } - // Listing ends if result not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectMultipartStatCh) - // return. - return objectMultipartStatCh - -} - -// listMultipartUploadsQuery - (List Multipart Uploads). -// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. -// request parameters. :- -// --------- -// ?key-marker - Specifies the multipart upload after which listing should begin. -// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (c Client) listMultipartUploadsQuery(ctx context.Context, bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set uploads. - urlValues.Set("uploads", "") - // Set object key marker. - if keyMarker != "" { - urlValues.Set("key-marker", keyMarker) - } - // Set upload id marker. - if uploadIDMarker != "" { - urlValues.Set("upload-id-marker", uploadIDMarker) - } - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", prefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Always set encoding-type - urlValues.Set("encoding-type", "url") - - // maxUploads should be 1000 or less. - if maxUploads > 0 { - // Set max-uploads. - urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) - } - - // Execute GET on bucketName to list multipart uploads. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListMultipartUploadsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - // Decode response body. - listMultipartUploadsResult := ListMultipartUploadsResult{} - err = xmlDecoder(resp.Body, &listMultipartUploadsResult) - if err != nil { - return listMultipartUploadsResult, err - } - - listMultipartUploadsResult.NextKeyMarker, err = decodeS3Name(listMultipartUploadsResult.NextKeyMarker, listMultipartUploadsResult.EncodingType) - if err != nil { - return listMultipartUploadsResult, err - } - - listMultipartUploadsResult.NextUploadIDMarker, err = decodeS3Name(listMultipartUploadsResult.NextUploadIDMarker, listMultipartUploadsResult.EncodingType) - if err != nil { - return listMultipartUploadsResult, err - } - - for i, obj := range listMultipartUploadsResult.Uploads { - listMultipartUploadsResult.Uploads[i].Key, err = decodeS3Name(obj.Key, listMultipartUploadsResult.EncodingType) - if err != nil { - return listMultipartUploadsResult, err - } - } - - for i, obj := range listMultipartUploadsResult.CommonPrefixes { - listMultipartUploadsResult.CommonPrefixes[i].Prefix, err = decodeS3Name(obj.Prefix, listMultipartUploadsResult.EncodingType) - if err != nil { - return listMultipartUploadsResult, err - } - } - - return listMultipartUploadsResult, nil -} - -// listObjectParts list all object parts recursively. -func (c Client) listObjectParts(ctx context.Context, bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { - // Part number marker for the next batch of request. - var nextPartNumberMarker int - partsInfo = make(map[int]ObjectPart) - for { - // Get list of uploaded parts a maximum of 1000 per request. - listObjPartsResult, err := c.listObjectPartsQuery(ctx, bucketName, objectName, uploadID, nextPartNumberMarker, 1000) - if err != nil { - return nil, err - } - // Append to parts info. - for _, part := range listObjPartsResult.ObjectParts { - // Trim off the odd double quotes from ETag in the beginning and end. - part.ETag = trimEtag(part.ETag) - partsInfo[part.PartNumber] = part - } - // Keep part number marker, for the next iteration. - nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker - // Listing ends result is not truncated, return right here. - if !listObjPartsResult.IsTruncated { - break - } - } - - // Return all the parts. - return partsInfo, nil -} - -// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. -func (c Client) findUploadIDs(ctx context.Context, bucketName, objectName string) ([]string, error) { - var uploadIDs []string - // Make list incomplete uploads recursive. - isRecursive := true - // List all incomplete uploads. - for mpUpload := range c.listIncompleteUploads(ctx, bucketName, objectName, isRecursive) { - if mpUpload.Err != nil { - return nil, mpUpload.Err - } - if objectName == mpUpload.Key { - uploadIDs = append(uploadIDs, mpUpload.UploadID) - } - } - // Return the latest upload id. - return uploadIDs, nil -} - -// listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded -// for a specific multipart upload -// -// You can use the request parameters as selection criteria to return -// a subset of the uploads in a bucket, request parameters :- -// --------- -// ?part-number-marker - Specifies the part after which listing should -// begin. -// ?max-parts - Maximum parts to be listed per request. -func (c Client) listObjectPartsQuery(ctx context.Context, bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number marker. - urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) - // Set upload id. - urlValues.Set("uploadId", uploadID) - - // maxParts should be 1000 or less. - if maxParts > 0 { - // Set max parts. - urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) - } - - // Execute GET on objectName to get list of parts. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListObjectPartsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode list object parts XML. - listObjectPartsResult := ListObjectPartsResult{} - err = xmlDecoder(resp.Body, &listObjectPartsResult) - if err != nil { - return listObjectPartsResult, err - } - return listObjectPartsResult, nil -} - -// Decode an S3 object name according to the encoding type -func decodeS3Name(name, encodingType string) (string, error) { - switch encodingType { - case "url": - return url.QueryUnescape(name) - default: - return name, nil - } -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go b/mantle/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go deleted file mode 100644 index b139c168..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-object-legal-hold.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// objectLegalHold - object legal hold specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/archive-RESTObjectPUTLegalHold.html -type objectLegalHold struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"LegalHold"` - Status LegalHoldStatus `xml:"Status,omitempty"` -} - -// PutObjectLegalHoldOptions represents options specified by user for PutObjectLegalHold call -type PutObjectLegalHoldOptions struct { - VersionID string - Status *LegalHoldStatus -} - -// GetObjectLegalHoldOptions represents options specified by user for GetObjectLegalHold call -type GetObjectLegalHoldOptions struct { - VersionID string -} - -// LegalHoldStatus - object legal hold status. -type LegalHoldStatus string - -const ( - // LegalHoldEnabled indicates legal hold is enabled - LegalHoldEnabled LegalHoldStatus = "ON" - - // LegalHoldDisabled indicates legal hold is disabled - LegalHoldDisabled LegalHoldStatus = "OFF" -) - -func (r LegalHoldStatus) String() string { - return string(r) -} - -// IsValid - check whether this legal hold status is valid or not. -func (r LegalHoldStatus) IsValid() bool { - return r == LegalHoldEnabled || r == LegalHoldDisabled -} - -func newObjectLegalHold(status *LegalHoldStatus) (*objectLegalHold, error) { - if status == nil { - return nil, fmt.Errorf("Status not set") - } - if !status.IsValid() { - return nil, fmt.Errorf("invalid legal hold status `%v`", status) - } - legalHold := &objectLegalHold{ - Status: *status, - } - return legalHold, nil -} - -// PutObjectLegalHold : sets object legal hold for a given object and versionID. -func (c Client) PutObjectLegalHold(ctx context.Context, bucketName, objectName string, opts PutObjectLegalHoldOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("legal-hold", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - lh, err := newObjectLegalHold(opts.Status) - if err != nil { - return err - } - - lhData, err := xml.Marshal(lh) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: bytes.NewReader(lhData), - contentLength: int64(len(lhData)), - contentMD5Base64: sumMD5Base64(lhData), - contentSHA256Hex: sum256Hex(lhData), - } - - // Execute PUT Object Legal Hold. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return nil -} - -// GetObjectLegalHold gets legal-hold status of given object. -func (c Client) GetObjectLegalHold(ctx context.Context, bucketName, objectName string, opts GetObjectLegalHoldOptions) (status *LegalHoldStatus, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - urlValues := make(url.Values) - urlValues.Set("legal-hold", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - lh := &objectLegalHold{} - if err = xml.NewDecoder(resp.Body).Decode(lh); err != nil { - return nil, err - } - - return &lh.Status, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-object-lock.go b/mantle/vendor/github.com/minio/minio-go/v7/api-object-lock.go deleted file mode 100644 index 29f52b05..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-object-lock.go +++ /dev/null @@ -1,241 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// RetentionMode - object retention mode. -type RetentionMode string - -const ( - // Governance - governance mode. - Governance RetentionMode = "GOVERNANCE" - - // Compliance - compliance mode. - Compliance RetentionMode = "COMPLIANCE" -) - -func (r RetentionMode) String() string { - return string(r) -} - -// IsValid - check whether this retention mode is valid or not. -func (r RetentionMode) IsValid() bool { - return r == Governance || r == Compliance -} - -// ValidityUnit - retention validity unit. -type ValidityUnit string - -const ( - // Days - denotes no. of days. - Days ValidityUnit = "DAYS" - - // Years - denotes no. of years. - Years ValidityUnit = "YEARS" -) - -func (unit ValidityUnit) String() string { - return string(unit) -} - -// IsValid - check whether this validity unit is valid or not. -func (unit ValidityUnit) isValid() bool { - return unit == Days || unit == Years -} - -// Retention - bucket level retention configuration. -type Retention struct { - Mode RetentionMode - Validity time.Duration -} - -func (r Retention) String() string { - return fmt.Sprintf("{Mode:%v, Validity:%v}", r.Mode, r.Validity) -} - -// IsEmpty - returns whether retention is empty or not. -func (r Retention) IsEmpty() bool { - return r.Mode == "" || r.Validity == 0 -} - -// objectLockConfig - object lock configuration specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html -type objectLockConfig struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"ObjectLockConfiguration"` - ObjectLockEnabled string `xml:"ObjectLockEnabled"` - Rule *struct { - DefaultRetention struct { - Mode RetentionMode `xml:"Mode"` - Days *uint `xml:"Days"` - Years *uint `xml:"Years"` - } `xml:"DefaultRetention"` - } `xml:"Rule,omitempty"` -} - -func newObjectLockConfig(mode *RetentionMode, validity *uint, unit *ValidityUnit) (*objectLockConfig, error) { - config := &objectLockConfig{ - ObjectLockEnabled: "Enabled", - } - - if mode != nil && validity != nil && unit != nil { - if !mode.IsValid() { - return nil, fmt.Errorf("invalid retention mode `%v`", mode) - } - - if !unit.isValid() { - return nil, fmt.Errorf("invalid validity unit `%v`", unit) - } - - config.Rule = &struct { - DefaultRetention struct { - Mode RetentionMode `xml:"Mode"` - Days *uint `xml:"Days"` - Years *uint `xml:"Years"` - } `xml:"DefaultRetention"` - }{} - - config.Rule.DefaultRetention.Mode = *mode - if *unit == Days { - config.Rule.DefaultRetention.Days = validity - } else { - config.Rule.DefaultRetention.Years = validity - } - - return config, nil - } - - if mode == nil && validity == nil && unit == nil { - return config, nil - } - - return nil, fmt.Errorf("all of retention mode, validity and validity unit must be passed") -} - -// SetBucketObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c Client) SetBucketObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("object-lock", "") - - config, err := newObjectLockConfig(mode, validity, unit) - if err != nil { - return err - } - - configData, err := xml.Marshal(config) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(configData), - contentLength: int64(len(configData)), - contentMD5Base64: sumMD5Base64(configData), - contentSHA256Hex: sum256Hex(configData), - } - - // Execute PUT bucket object lock configuration. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// GetObjectLockConfig gets object lock configuration of given bucket. -func (c Client) GetObjectLockConfig(ctx context.Context, bucketName string) (objectLock string, mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", nil, nil, nil, err - } - - urlValues := make(url.Values) - urlValues.Set("object-lock", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return "", nil, nil, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", nil, nil, nil, httpRespToErrorResponse(resp, bucketName, "") - } - } - config := &objectLockConfig{} - if err = xml.NewDecoder(resp.Body).Decode(config); err != nil { - return "", nil, nil, nil, err - } - - if config.Rule != nil { - mode = &config.Rule.DefaultRetention.Mode - if config.Rule.DefaultRetention.Days != nil { - validity = config.Rule.DefaultRetention.Days - days := Days - unit = &days - } else { - validity = config.Rule.DefaultRetention.Years - years := Years - unit = &years - } - return config.ObjectLockEnabled, mode, validity, unit, nil - } - return config.ObjectLockEnabled, nil, nil, nil, nil -} - -// GetBucketObjectLockConfig gets object lock configuration of given bucket. -func (c Client) GetBucketObjectLockConfig(ctx context.Context, bucketName string) (mode *RetentionMode, validity *uint, unit *ValidityUnit, err error) { - _, mode, validity, unit, err = c.GetObjectLockConfig(ctx, bucketName) - return mode, validity, unit, err -} - -// SetObjectLockConfig sets object lock configuration in given bucket. mode, validity and unit are either all set or all nil. -func (c Client) SetObjectLockConfig(ctx context.Context, bucketName string, mode *RetentionMode, validity *uint, unit *ValidityUnit) error { - return c.SetBucketObjectLockConfig(ctx, bucketName, mode, validity, unit) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-object-retention.go b/mantle/vendor/github.com/minio/minio-go/v7/api-object-retention.go deleted file mode 100644 index 54f2762d..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-object-retention.go +++ /dev/null @@ -1,165 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "fmt" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// objectRetention - object retention specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html -type objectRetention struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"Retention"` - Mode RetentionMode `xml:"Mode,omitempty"` - RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601" xml:"RetainUntilDate,omitempty"` -} - -func newObjectRetention(mode *RetentionMode, date *time.Time) (*objectRetention, error) { - objectRetention := &objectRetention{} - - if date != nil && !date.IsZero() { - objectRetention.RetainUntilDate = date - } - if mode != nil { - if !mode.IsValid() { - return nil, fmt.Errorf("invalid retention mode `%v`", mode) - } - objectRetention.Mode = *mode - } - - return objectRetention, nil -} - -// PutObjectRetentionOptions represents options specified by user for PutObject call -type PutObjectRetentionOptions struct { - GovernanceBypass bool - Mode *RetentionMode - RetainUntilDate *time.Time - VersionID string -} - -// PutObjectRetention sets object retention for a given object and versionID. -func (c Client) PutObjectRetention(ctx context.Context, bucketName, objectName string, opts PutObjectRetentionOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("retention", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - retention, err := newObjectRetention(opts.Mode, opts.RetainUntilDate) - if err != nil { - return err - } - - retentionData, err := xml.Marshal(retention) - if err != nil { - return err - } - - // Build headers. - headers := make(http.Header) - - if opts.GovernanceBypass { - // Set the bypass goverenance retention header - headers.Set(amzBypassGovernance, "true") - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: bytes.NewReader(retentionData), - contentLength: int64(len(retentionData)), - contentMD5Base64: sumMD5Base64(retentionData), - contentSHA256Hex: sum256Hex(retentionData), - customHeader: headers, - } - - // Execute PUT Object Retention. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return nil -} - -// GetObjectRetention gets retention of given object. -func (c Client) GetObjectRetention(ctx context.Context, bucketName, objectName, versionID string) (mode *RetentionMode, retainUntilDate *time.Time, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, nil, err - } - - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, nil, err - } - urlValues := make(url.Values) - urlValues.Set("retention", "") - if versionID != "" { - urlValues.Set("versionId", versionID) - } - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return nil, nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - retention := &objectRetention{} - if err = xml.NewDecoder(resp.Body).Decode(retention); err != nil { - return nil, nil, err - } - - return &retention.Mode, retention.RetainUntilDate, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-object-tagging.go b/mantle/vendor/github.com/minio/minio-go/v7/api-object-tagging.go deleted file mode 100644 index 2709efcd..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-object-tagging.go +++ /dev/null @@ -1,157 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/tags" -) - -// PutObjectTaggingOptions holds an object version id -// to update tag(s) of a specific object version -type PutObjectTaggingOptions struct { - VersionID string -} - -// PutObjectTagging replaces or creates object tag(s) and can target -// a specific object version in a versioned bucket. -func (c Client) PutObjectTagging(ctx context.Context, bucketName, objectName string, otags *tags.Tags, opts PutObjectTaggingOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - reqBytes, err := xml.Marshal(otags) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: bytes.NewReader(reqBytes), - contentLength: int64(len(reqBytes)), - contentMD5Base64: sumMD5Base64(reqBytes), - } - - // Execute PUT to set a object tagging. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return nil -} - -// GetObjectTaggingOptions holds the object version ID -// to fetch the tagging key/value pairs -type GetObjectTaggingOptions struct { - VersionID string -} - -// GetObjectTagging fetches object tag(s) with options to target -// a specific object version in a versioned bucket. -func (c Client) GetObjectTagging(ctx context.Context, bucketName, objectName string, opts GetObjectTaggingOptions) (*tags.Tags, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - // Execute GET on object to get object tag(s) - resp, err := c.executeMethod(ctx, http.MethodGet, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - return tags.ParseObjectXML(resp.Body) -} - -// RemoveObjectTaggingOptions holds the version id of the object to remove -type RemoveObjectTaggingOptions struct { - VersionID string -} - -// RemoveObjectTagging removes object tag(s) with options to control a specific object -// version in a versioned bucket -func (c Client) RemoveObjectTagging(ctx context.Context, bucketName, objectName string, opts RemoveObjectTaggingOptions) error { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("tagging", "") - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - // Execute DELETE on object to remove object tag(s) - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp != nil { - // S3 returns "204 No content" after Object tag deletion. - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - return err -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-presigned.go b/mantle/vendor/github.com/minio/minio-go/v7/api-presigned.go deleted file mode 100644 index 80c363da..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-presigned.go +++ /dev/null @@ -1,216 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "errors" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/signer" -) - -// presignURL - Returns a presigned URL for an input 'method'. -// Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c Client) presignURL(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - // Input validation. - if method == "" { - return nil, errInvalidArgument("method cannot be empty.") - } - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err = isValidExpiry(expires); err != nil { - return nil, err - } - - // Convert expires into seconds. - expireSeconds := int64(expires / time.Second) - reqMetadata := requestMetadata{ - presignURL: true, - bucketName: bucketName, - objectName: objectName, - expires: expireSeconds, - queryValues: reqParams, - } - - // Instantiate a new request. - // Since expires is set newRequest will presign the request. - var req *http.Request - if req, err = c.newRequest(ctx, method, reqMetadata); err != nil { - return nil, err - } - return req.URL, nil -} - -// PresignedGetObject - Returns a presigned URL to access an object -// data without credentials. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. Additionally you can override -// a set of response headers using the query parameters. -func (c Client) PresignedGetObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL(ctx, http.MethodGet, bucketName, objectName, expires, reqParams) -} - -// PresignedHeadObject - Returns a presigned URL to access -// object metadata without credentials. URL can have a maximum expiry -// of upto 7days or a minimum of 1sec. Additionally you can override -// a set of response headers using the query parameters. -func (c Client) PresignedHeadObject(ctx context.Context, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL(ctx, http.MethodHead, bucketName, objectName, expires, reqParams) -} - -// PresignedPutObject - Returns a presigned URL to upload an object -// without credentials. URL can have a maximum expiry of upto 7days -// or a minimum of 1sec. -func (c Client) PresignedPutObject(ctx context.Context, bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL(ctx, http.MethodPut, bucketName, objectName, expires, nil) -} - -// Presign - returns a presigned URL for any http method of your choice -// along with custom request params. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. -func (c Client) Presign(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - return c.presignURL(ctx, method, bucketName, objectName, expires, reqParams) -} - -// PresignedPostPolicy - Returns POST urlString, form data to upload an object. -func (c Client) PresignedPostPolicy(ctx context.Context, p *PostPolicy) (u *url.URL, formData map[string]string, err error) { - // Validate input arguments. - if p.expiration.IsZero() { - return nil, nil, errors.New("Expiration time must be specified") - } - if _, ok := p.formData["key"]; !ok { - return nil, nil, errors.New("object key must be specified") - } - if _, ok := p.formData["bucket"]; !ok { - return nil, nil, errors.New("bucket name must be specified") - } - - bucketName := p.formData["bucket"] - // Fetch the bucket location. - location, err := c.getBucketLocation(ctx, bucketName) - if err != nil { - return nil, nil, err - } - - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) - - u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) - if err != nil { - return nil, nil, err - } - - // Get credentials from the configured credentials provider. - credValues, err := c.credsProvider.Get() - if err != nil { - return nil, nil, err - } - - var ( - signerType = credValues.SignerType - sessionToken = credValues.SessionToken - accessKeyID = credValues.AccessKeyID - secretAccessKey = credValues.SecretAccessKey - ) - - if signerType.IsAnonymous() { - return nil, nil, errInvalidArgument("Presigned operations are not supported for anonymous credentials") - } - - // Keep time. - t := time.Now().UTC() - // For signature version '2' handle here. - if signerType.IsV2() { - policyBase64 := p.base64() - p.formData["policy"] = policyBase64 - // For Google endpoint set this value to be 'GoogleAccessId'. - if s3utils.IsGoogleEndpoint(*c.endpointURL) { - p.formData["GoogleAccessId"] = accessKeyID - } else { - // For all other endpoints set this value to be 'AWSAccessKeyId'. - p.formData["AWSAccessKeyId"] = accessKeyID - } - // Sign the policy. - p.formData["signature"] = signer.PostPresignSignatureV2(policyBase64, secretAccessKey) - return u, p.formData, nil - } - - // Add date policy. - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-date", - value: t.Format(iso8601DateFormat), - }); err != nil { - return nil, nil, err - } - - // Add algorithm policy. - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-algorithm", - value: signV4Algorithm, - }); err != nil { - return nil, nil, err - } - - // Add a credential policy. - credential := signer.GetCredential(accessKeyID, location, t, signer.ServiceTypeS3) - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-credential", - value: credential, - }); err != nil { - return nil, nil, err - } - - if sessionToken != "" { - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-security-token", - value: sessionToken, - }); err != nil { - return nil, nil, err - } - } - - // Get base64 encoded policy. - policyBase64 := p.base64() - - // Fill in the form data. - p.formData["policy"] = policyBase64 - p.formData["x-amz-algorithm"] = signV4Algorithm - p.formData["x-amz-credential"] = credential - p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - if sessionToken != "" { - p.formData["x-amz-security-token"] = sessionToken - } - p.formData["x-amz-signature"] = signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) - return u, p.formData, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-put-bucket.go b/mantle/vendor/github.com/minio/minio-go/v7/api-put-bucket.go deleted file mode 100644 index df9fe98a..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-put-bucket.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "net/http" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -/// Bucket operations -func (c Client) makeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { - // Validate the input arguments. - if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { - return err - } - - err = c.doMakeBucket(ctx, bucketName, opts.Region, opts.ObjectLocking) - if err != nil && (opts.Region == "" || opts.Region == "us-east-1") { - if resp, ok := err.(ErrorResponse); ok && resp.Code == "AuthorizationHeaderMalformed" && resp.Region != "" { - err = c.doMakeBucket(ctx, bucketName, resp.Region, opts.ObjectLocking) - } - } - return err -} - -func (c Client) doMakeBucket(ctx context.Context, bucketName string, location string, objectLockEnabled bool) (err error) { - defer func() { - // Save the location into cache on a successful makeBucket response. - if err == nil { - c.bucketLocCache.Set(bucketName, location) - } - }() - - // If location is empty, treat is a default region 'us-east-1'. - if location == "" { - location = "us-east-1" - // For custom region clients, default - // to custom region instead not 'us-east-1'. - if c.region != "" { - location = c.region - } - } - // PUT bucket request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - bucketLocation: location, - } - - if objectLockEnabled { - headers := make(http.Header) - headers.Add("x-amz-bucket-object-lock-enabled", "true") - reqMetadata.customHeader = headers - } - - // If location is not 'us-east-1' create bucket location config. - if location != "us-east-1" && location != "" { - createBucketConfig := createBucketConfiguration{} - createBucketConfig.Location = location - var createBucketConfigBytes []byte - createBucketConfigBytes, err = xml.Marshal(createBucketConfig) - if err != nil { - return err - } - reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) - reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) - reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) - reqMetadata.contentLength = int64(len(createBucketConfigBytes)) - } - - // Execute PUT to create a new bucket. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Success. - return nil -} - -// MakeBucketOptions holds all options to tweak bucket creation -type MakeBucketOptions struct { - // Bucket location - Region string - // Enable object locking - ObjectLocking bool -} - -// MakeBucket creates a new bucket with bucketName with a context to control cancellations and timeouts. -// -// Location is an optional argument, by default all buckets are -// created in US Standard Region. -// -// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html -// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucket(ctx context.Context, bucketName string, opts MakeBucketOptions) (err error) { - return c.makeBucket(ctx, bucketName, opts) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-common.go b/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-common.go deleted file mode 100644 index f1653afe..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-common.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "math" - "os" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// Verify if reader is *minio.Object -func isObject(reader io.Reader) (ok bool) { - _, ok = reader.(*Object) - return -} - -// Verify if reader is a generic ReaderAt -func isReadAt(reader io.Reader) (ok bool) { - var v *os.File - v, ok = reader.(*os.File) - if ok { - // Stdin, Stdout and Stderr all have *os.File type - // which happen to also be io.ReaderAt compatible - // we need to add special conditions for them to - // be ignored by this function. - for _, f := range []string{ - "/dev/stdin", - "/dev/stdout", - "/dev/stderr", - } { - if f == v.Name() { - ok = false - break - } - } - } else { - _, ok = reader.(io.ReaderAt) - } - return -} - -// OptimalPartInfo - calculate the optimal part info for a given -// object size. -// -// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible -// object storage it will have the following parameters as constants. -// -// maxPartsCount - 10000 -// minPartSize - 16MiB -// maxMultipartPutObjectSize - 5TiB -// -func OptimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { - // object size is '-1' set it to 5TiB. - var unknownSize bool - if objectSize == -1 { - unknownSize = true - objectSize = maxMultipartPutObjectSize - } - - // object size is larger than supported maximum. - if objectSize > maxMultipartPutObjectSize { - err = errEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") - return - } - - var partSizeFlt float64 - if configuredPartSize > 0 { - if int64(configuredPartSize) > objectSize { - err = errEntityTooLarge(int64(configuredPartSize), objectSize, "", "") - return - } - - if !unknownSize { - if objectSize > (int64(configuredPartSize) * maxPartsCount) { - err = errInvalidArgument("Part size * max_parts(10000) is lesser than input objectSize.") - return - } - } - - if configuredPartSize < absMinPartSize { - err = errInvalidArgument("Input part size is smaller than allowed minimum of 5MiB.") - return - } - - if configuredPartSize > maxPartSize { - err = errInvalidArgument("Input part size is bigger than allowed maximum of 5GiB.") - return - } - - partSizeFlt = float64(configuredPartSize) - if unknownSize { - // If input has unknown size and part size is configured - // keep it to maximum allowed as per 10000 parts. - objectSize = int64(configuredPartSize) * maxPartsCount - } - } else { - configuredPartSize = minPartSize - // Use floats for part size for all calculations to avoid - // overflows during float64 to int64 conversions. - partSizeFlt = float64(objectSize / maxPartsCount) - partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) - } - - // Total parts count. - totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) - // Part size. - partSize = int64(partSizeFlt) - // Last part size. - lastPartSize = objectSize - int64(totalPartsCount-1)*partSize - return totalPartsCount, partSize, lastPartSize, nil -} - -// getUploadID - fetch upload id if already present for an object name -// or initiate a new request to fetch a new upload id. -func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return "", err - } - - // Initiate multipart upload for an object. - initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) - if err != nil { - return "", err - } - return initMultipartUploadResult.UploadID, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go b/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go deleted file mode 100644 index 6c0f20df..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-file-context.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "mime" - "os" - "path/filepath" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// FPutObject - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. -func (c Client) FPutObject(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Open the referenced file. - fileReader, err := os.Open(filePath) - // If any error fail quickly here. - if err != nil { - return UploadInfo{}, err - } - defer fileReader.Close() - - // Save the file stat. - fileStat, err := fileReader.Stat() - if err != nil { - return UploadInfo{}, err - } - - // Save the file size. - fileSize := fileStat.Size() - - // Set contentType based on filepath extension if not given or default - // value of "application/octet-stream" if the extension has no associated type. - if opts.ContentType == "" { - if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { - opts.ContentType = "application/octet-stream" - } - } - return c.PutObject(ctx, bucketName, objectName, fileReader, fileSize, opts) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go deleted file mode 100644 index 873ec387..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ /dev/null @@ -1,394 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - - "github.com/google/uuid" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, - opts PutObjectOptions) (info UploadInfo, err error) { - info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - } - return info, err -} - -func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Total data read and written to server. should be equal to - // 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Part number always starts with '1'. - partNumber := 1 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - - for partNumber <= totalPartsCount { - // Choose hash algorithms to be calculated by hashCopyN, - // avoid sha256 with non-v4 signature request or - // HTTPS connection. - hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5) - - length, rErr := readFull(reader, buf) - if rErr == io.EOF && partNumber > 1 { - break - } - - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return UploadInfo{}, rErr - } - - // Calculates hash sums while copying partSize bytes into cw. - for k, v := range hashAlgos { - v.Write(buf[:length]) - hashSums[k] = v.Sum(nil) - v.Close() - } - - // Update progress reader appropriately to the latest offset - // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - // Checksums.. - var ( - md5Base64 string - sha256Hex string - ) - if hashSums["md5"] != nil { - md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) - } - if hashSums["sha256"] != nil { - sha256Hex = hex.EncodeToString(hashSums["sha256"]) - } - - // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) - if uerr != nil { - return UploadInfo{}, uerr - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - - // Increment part number. - partNumber++ - - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if rErr == io.EOF { - break - } - } - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} - -// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return initiateMultipartUploadResult{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return initiateMultipartUploadResult{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploads", "") - - if opts.Internal.SourceVersionID != "" { - if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { - return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) - } - urlValues.Set("versionId", opts.Internal.SourceVersionID) - } - - // Set ContentType header. - customHeader := opts.Header() - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - } - - // Execute POST on an objectName to initiate multipart upload. - resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) - defer closeResponse(resp) - if err != nil { - return initiateMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode xml for new multipart upload. - initiateMultipartUploadResult := initiateMultipartUploadResult{} - err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) - if err != nil { - return initiateMultipartUploadResult, err - } - return initiateMultipartUploadResult, nil -} - -// uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectPart{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectPart{}, err - } - if size > maxPartSize { - return ObjectPart{}, errEntityTooLarge(size, maxPartSize, bucketName, objectName) - } - if size <= -1 { - return ObjectPart{}, errEntityTooSmall(size, bucketName, objectName) - } - if partNumber <= 0 { - return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") - } - if uploadID == "" { - return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") - } - - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number. - urlValues.Set("partNumber", strconv.Itoa(partNumber)) - // Set upload id. - urlValues.Set("uploadId", uploadID) - - // Set encryption headers, if any. - customHeader := make(http.Header) - // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html - // Server-side encryption is supported by the S3 Multipart Upload actions. - // Unless you are using a customer-provided encryption key, you don't need - // to specify the encryption parameters in each UploadPart request. - if sse != nil && sse.Type() == encrypt.SSEC { - sse.Marshal(customHeader) - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - } - - // Execute PUT on each part. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectPart{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Once successfully uploaded, return completed part. - objPart := ObjectPart{} - objPart.Size = size - objPart.PartNumber = partNumber - // Trim off the odd double quotes from ETag in the beginning and end. - objPart.ETag = trimEtag(resp.Header.Get("ETag")) - return objPart, nil -} - -// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload, opts PutObjectOptions) (UploadInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - // Marshal complete multipart body. - completeMultipartUploadBytes, err := xml.Marshal(complete) - if err != nil { - return UploadInfo{}, err - } - - // Instantiate all the complete multipart buffer. - completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: completeMultipartUploadBuffer, - contentLength: int64(len(completeMultipartUploadBytes)), - contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), - customHeader: opts.Header(), - } - - // Execute POST to complete multipart upload for an objectName. - resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) - defer closeResponse(resp) - if err != nil { - return UploadInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Read resp.Body into a []bytes to parse for Error response inside the body - var b []byte - b, err = ioutil.ReadAll(resp.Body) - if err != nil { - return UploadInfo{}, err - } - // Decode completed multipart upload response on success. - completeMultipartUploadResult := completeMultipartUploadResult{} - err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) - if err != nil { - // xml parsing failure due to presence an ill-formed xml fragment - return UploadInfo{}, err - } else if completeMultipartUploadResult.Bucket == "" { - // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. - // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values - // of the members. - - // Decode completed multipart upload response on failure - completeMultipartUploadErr := ErrorResponse{} - err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) - if err != nil { - // xml parsing failure due to presence an ill-formed xml fragment - return UploadInfo{}, err - } - return UploadInfo{}, completeMultipartUploadErr - } - - // extract lifecycle expiry date and rule ID - expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) - - return UploadInfo{ - Bucket: completeMultipartUploadResult.Bucket, - Key: completeMultipartUploadResult.Key, - ETag: trimEtag(completeMultipartUploadResult.ETag), - VersionID: resp.Header.Get(amzVersionID), - Location: completeMultipartUploadResult.Location, - Expiration: expTime, - ExpirationRuleID: ruleID, - }, nil - -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go deleted file mode 100644 index f1cc9fbb..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ /dev/null @@ -1,487 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strings" - - "github.com/google/uuid" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// putObjectMultipartStream - upload a large object using -// multipart upload and streaming signature for signing payload. -// Comprehensive put object operation involving multipart uploads. -// -// Following code handles these types of readers. -// -// - *minio.Object -// - Any reader which has a method 'ReadAt()' -// -func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - - if !isObject(reader) && isReadAt(reader) && !opts.SendContentMd5 { - // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. - info, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) - } else { - info, err = c.putObjectMultipartStreamOptionalChecksum(ctx, bucketName, objectName, reader, size, opts) - } - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - } - return info, err -} - -// uploadedPartRes - the response received from a part upload. -type uploadedPartRes struct { - Error error // Any error encountered while uploading the part. - PartNum int // Number of the part uploaded. - Size int64 // Size of the part uploaded. - Part ObjectPart -} - -type uploadPartReq struct { - PartNum int // Number of the part uploaded. - Part ObjectPart // Size of the part uploaded. -} - -// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB. -// Supports all readers which implements io.ReaderAt interface -// (ReadAt method). -// -// NOTE: This function is meant to be used for all readers which -// implement io.ReaderAt which allows us for resuming multipart -// uploads but reading at an offset, which would avoid re-read the -// data which was already uploaded. Internally this function uses -// temporary files for staging all the data, these temporary files are -// cleaned automatically when the caller i.e http client closes the -// stream after uploading all the contents successfully. -func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, - reader io.ReaderAt, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - - // Aborts the multipart upload in progress, if the - // function returns any error, since we do not resume - // we should purge the parts which have been uploaded - // to relinquish storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Declare a channel that sends the next part number to be uploaded. - // Buffered to 10000 because thats the maximum number of parts allowed - // by S3. - uploadPartsCh := make(chan uploadPartReq, 10000) - - // Declare a channel that sends back the response of a part upload. - // Buffered to 10000 because thats the maximum number of parts allowed - // by S3. - uploadedPartsCh := make(chan uploadedPartRes, 10000) - - // Used for readability, lastPartNumber is always totalPartsCount. - lastPartNumber := totalPartsCount - - // Send each part number to the channel to be processed. - for p := 1; p <= totalPartsCount; p++ { - uploadPartsCh <- uploadPartReq{PartNum: p} - } - close(uploadPartsCh) - - var partsBuf = make([][]byte, opts.getNumThreads()) - for i := range partsBuf { - partsBuf[i] = make([]byte, 0, partSize) - } - - // Receive each part number from the channel allowing three parallel uploads. - for w := 1; w <= opts.getNumThreads(); w++ { - go func(w int, partSize int64) { - // Each worker will draw from the part channel and upload in parallel. - for uploadReq := range uploadPartsCh { - - // If partNumber was not uploaded we calculate the missing - // part offset and size. For all other part numbers we - // calculate offset based on multiples of partSize. - readOffset := int64(uploadReq.PartNum-1) * partSize - - // As a special case if partNumber is lastPartNumber, we - // calculate the offset based on the last part size. - if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) - partSize = lastPartSize - } - - n, rerr := readFull(io.NewSectionReader(reader, readOffset, partSize), partsBuf[w-1][:partSize]) - if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { - uploadedPartsCh <- uploadedPartRes{ - Error: rerr, - } - // Exit the goroutine. - return - } - - // Get a section reader on a particular offset. - hookReader := newHook(bytes.NewReader(partsBuf[w-1][:n]), opts.Progress) - - // Proceed to upload the part. - objPart, err := c.uploadPart(ctx, bucketName, objectName, - uploadID, hookReader, uploadReq.PartNum, - "", "", partSize, opts.ServerSideEncryption) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Error: err, - } - // Exit the goroutine. - return - } - - // Save successfully uploaded part metadata. - uploadReq.Part = objPart - - // Send successful part info through the channel. - uploadedPartsCh <- uploadedPartRes{ - Size: objPart.Size, - PartNum: uploadReq.PartNum, - Part: uploadReq.Part, - } - } - }(w, partSize) - } - - // Gather the responses as they occur and update any - // progress bar. - for u := 1; u <= totalPartsCount; u++ { - uploadRes := <-uploadedPartsCh - if uploadRes.Error != nil { - return UploadInfo{}, uploadRes.Error - } - // Update the totalUploadedSize. - totalUploadedSize += uploadRes.Size - // Store the parts to be completed in order. - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: uploadRes.Part.ETag, - PartNumber: uploadRes.Part.PartNumber, - }) - } - - // Verify if we uploaded all the data. - if totalUploadedSize != size { - return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} - -func (c Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := OptimalPartInfo(size, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - // Initiates a new multipart request - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - - // Aborts the multipart upload if the function returns - // any error, since we do not resume we should purge - // the parts which have been uploaded to relinquish - // storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - - // Avoid declaring variables in the for loop - var md5Base64 string - var hookReader io.Reader - - // Part number always starts with '1'. - var partNumber int - for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { - - // Proceed to upload the part. - if partNumber == totalPartsCount { - partSize = lastPartSize - } - - if opts.SendContentMd5 { - length, rerr := readFull(reader, buf) - if rerr == io.EOF && partNumber > 1 { - break - } - - if rerr != nil && rerr != io.ErrUnexpectedEOF && err != io.EOF { - return UploadInfo{}, rerr - } - - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) - md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - hash.Close() - - // Update progress reader appropriately to the latest offset - // as we read from the source. - hookReader = newHook(bytes.NewReader(buf[:length]), opts.Progress) - } else { - // Update progress reader appropriately to the latest offset - // as we read from the source. - hookReader = newHook(reader, opts.Progress) - } - - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, - io.LimitReader(hookReader, partSize), - partNumber, md5Base64, "", partSize, opts.ServerSideEncryption) - if uerr != nil { - return UploadInfo{}, uerr - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += partSize - } - - // Verify if we uploaded all the data. - if size > 0 { - if totalUploadedSize != size { - return UploadInfo{}, errUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - } - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} - -// putObject special function used Google Cloud Storage. This special function -// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObject(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Size -1 is only supported on Google Cloud Storage, we error - // out in all other situations. - if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { - return UploadInfo{}, errEntityTooSmall(size, bucketName, objectName) - } - - if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { - return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") - } - - if size > 0 { - if isReadAt(reader) && !isObject(reader) { - seeker, ok := reader.(io.Seeker) - if ok { - offset, err := seeker.Seek(0, io.SeekCurrent) - if err != nil { - return UploadInfo{}, errInvalidArgument(err.Error()) - } - reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) - } - } - } - - var md5Base64 string - if opts.SendContentMd5 { - // Create a buffer. - buf := make([]byte, size) - - length, rErr := readFull(reader, buf) - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return UploadInfo{}, rErr - } - - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) - md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - reader = bytes.NewReader(buf[:length]) - hash.Close() - } - - // Update progress reader appropriately to the latest offset as we - // read from the source. - readSeeker := newHook(reader, opts.Progress) - - // This function does not calculate sha256 and md5sum for payload. - // Execute put object. - return c.putObjectDo(ctx, bucketName, objectName, readSeeker, md5Base64, "", size, opts) -} - -// putObjectDo - executes the put object http operation. -// NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (UploadInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - // Set headers. - customHeader := opts.Header() - - // Populate request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - } - if opts.Internal.SourceVersionID != "" { - if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { - return UploadInfo{}, errInvalidArgument(err.Error()) - } - urlValues := make(url.Values) - urlValues.Set("versionId", opts.Internal.SourceVersionID) - reqMetadata.queryValues = urlValues - } - - // Execute PUT an objectName. - resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) - defer closeResponse(resp) - if err != nil { - return UploadInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // extract lifecycle expiry date and rule ID - expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) - - return UploadInfo{ - Bucket: bucketName, - Key: objectName, - ETag: trimEtag(resp.Header.Get("ETag")), - VersionID: resp.Header.Get(amzVersionID), - Size: size, - Expiration: expTime, - ExpirationRuleID: ruleID, - }, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object.go b/mantle/vendor/github.com/minio/minio-go/v7/api-put-object.go deleted file mode 100644 index b89f96d5..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ /dev/null @@ -1,370 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "fmt" - "io" - "net/http" - "sort" - "time" - - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" - "golang.org/x/net/http/httpguts" -) - -// ReplicationStatus represents replication status of object -type ReplicationStatus string - -const ( - // ReplicationStatusPending indicates replication is pending - ReplicationStatusPending ReplicationStatus = "PENDING" - // ReplicationStatusComplete indicates replication completed ok - ReplicationStatusComplete ReplicationStatus = "COMPLETE" - // ReplicationStatusFailed indicates replication failed - ReplicationStatusFailed ReplicationStatus = "FAILED" - // ReplicationStatusReplica indicates object is a replica of a source - ReplicationStatusReplica ReplicationStatus = "REPLICA" -) - -// Empty returns true if no replication status set. -func (r ReplicationStatus) Empty() bool { - return r == "" -} - -// AdvancedPutOptions for internal use - to be utilized by replication, ILM transition -// implementation on MinIO server -type AdvancedPutOptions struct { - SourceVersionID string - SourceETag string - ReplicationStatus ReplicationStatus - SourceMTime time.Time - ReplicationRequest bool -} - -// PutObjectOptions represents options specified by user for PutObject call -type PutObjectOptions struct { - UserMetadata map[string]string - UserTags map[string]string - Progress io.Reader - ContentType string - ContentEncoding string - ContentDisposition string - ContentLanguage string - CacheControl string - Mode RetentionMode - RetainUntilDate time.Time - ServerSideEncryption encrypt.ServerSide - NumThreads uint - StorageClass string - WebsiteRedirectLocation string - PartSize uint64 - LegalHold LegalHoldStatus - SendContentMd5 bool - DisableMultipart bool - Internal AdvancedPutOptions -} - -// getNumThreads - gets the number of threads to be used in the multipart -// put object operation -func (opts PutObjectOptions) getNumThreads() (numThreads int) { - if opts.NumThreads > 0 { - numThreads = int(opts.NumThreads) - } else { - numThreads = totalWorkers - } - return -} - -// Header - constructs the headers from metadata entered by user in -// PutObjectOptions struct -func (opts PutObjectOptions) Header() (header http.Header) { - header = make(http.Header) - - contentType := opts.ContentType - if contentType == "" { - contentType = "application/octet-stream" - } - header.Set("Content-Type", contentType) - - if opts.ContentEncoding != "" { - header.Set("Content-Encoding", opts.ContentEncoding) - } - if opts.ContentDisposition != "" { - header.Set("Content-Disposition", opts.ContentDisposition) - } - if opts.ContentLanguage != "" { - header.Set("Content-Language", opts.ContentLanguage) - } - if opts.CacheControl != "" { - header.Set("Cache-Control", opts.CacheControl) - } - - if opts.Mode != "" { - header.Set(amzLockMode, opts.Mode.String()) - } - - if !opts.RetainUntilDate.IsZero() { - header.Set("X-Amz-Object-Lock-Retain-Until-Date", opts.RetainUntilDate.Format(time.RFC3339)) - } - - if opts.LegalHold != "" { - header.Set(amzLegalHoldHeader, opts.LegalHold.String()) - } - - if opts.ServerSideEncryption != nil { - opts.ServerSideEncryption.Marshal(header) - } - - if opts.StorageClass != "" { - header.Set(amzStorageClass, opts.StorageClass) - } - - if opts.WebsiteRedirectLocation != "" { - header.Set(amzWebsiteRedirectLocation, opts.WebsiteRedirectLocation) - } - - if !opts.Internal.ReplicationStatus.Empty() { - header.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) - } - if !opts.Internal.SourceMTime.IsZero() { - header.Set(minIOBucketSourceMTime, opts.Internal.SourceMTime.Format(time.RFC3339Nano)) - } - if opts.Internal.SourceETag != "" { - header.Set(minIOBucketSourceETag, opts.Internal.SourceETag) - } - if opts.Internal.ReplicationRequest { - header.Set(minIOBucketReplicationRequest, "") - } - if len(opts.UserTags) != 0 { - header.Set(amzTaggingHeader, s3utils.TagEncode(opts.UserTags)) - } - - for k, v := range opts.UserMetadata { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { - header.Set(k, v) - } else { - header.Set("x-amz-meta-"+k, v) - } - } - return -} - -// validate() checks if the UserMetadata map has standard headers or and raises an error if so. -func (opts PutObjectOptions) validate() (err error) { - for k, v := range opts.UserMetadata { - if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { - return errInvalidArgument(k + " unsupported user defined metadata name") - } - if !httpguts.ValidHeaderFieldValue(v) { - return errInvalidArgument(v + " unsupported user defined metadata value") - } - } - if opts.Mode != "" && !opts.Mode.IsValid() { - return errInvalidArgument(opts.Mode.String() + " unsupported retention mode") - } - if opts.LegalHold != "" && !opts.LegalHold.IsValid() { - return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") - } - return nil -} - -// completedParts is a collection of parts sortable by their part numbers. -// used for sorting the uploaded parts before completing the multipart request. -type completedParts []CompletePart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } - -// PutObject creates an object in a bucket. -// -// You must have WRITE permissions on a bucket to create an object. -// -// - For size smaller than 128MiB PutObject automatically does a -// single atomic Put operation. -// - For size larger than 128MiB PutObject automatically does a -// multipart Put operation. -// - For size input as -1 PutObject does a multipart Put operation -// until input stream reaches EOF. Maximum object size that can -// be uploaded through this operation will be 5TiB. -func (c Client) PutObject(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (info UploadInfo, err error) { - if objectSize < 0 && opts.DisableMultipart { - return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") - } - - err = opts.validate() - if err != nil { - return UploadInfo{}, err - } - - return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) -} - -func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (info UploadInfo, err error) { - // Check for largest object size allowed. - if size > int64(maxMultipartPutObjectSize) { - return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) - } - - // NOTE: Streaming signature is not supported by GCS. - if s3utils.IsGoogleEndpoint(*c.endpointURL) { - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - - partSize := opts.PartSize - if opts.PartSize == 0 { - partSize = minPartSize - } - - if c.overrideSignerType.IsV2() { - if size >= 0 && size < int64(partSize) || opts.DisableMultipart { - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) - } - - if size < 0 { - return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) - } - - if size < int64(partSize) || opts.DisableMultipart { - return c.putObject(ctx, bucketName, objectName, reader, size, opts) - } - - return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) -} - -func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return UploadInfo{}, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return UploadInfo{}, err - } - - // Total data read and written to server. should be equal to - // 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) - if err != nil { - return UploadInfo{}, err - } - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return UploadInfo{}, err - } - - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Part number always starts with '1'. - partNumber := 1 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - - for partNumber <= totalPartsCount { - length, rerr := readFull(reader, buf) - if rerr == io.EOF && partNumber > 1 { - break - } - - if rerr != nil && rerr != io.ErrUnexpectedEOF && rerr != io.EOF { - return UploadInfo{}, rerr - } - - var md5Base64 string - if opts.SendContentMd5 { - // Calculate md5sum. - hash := c.md5Hasher() - hash.Write(buf[:length]) - md5Base64 = base64.StdEncoding.EncodeToString(hash.Sum(nil)) - hash.Close() - } - - // Update progress reader appropriately to the latest offset - // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - // Proceed to upload the part. - objPart, uerr := c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - md5Base64, "", int64(length), opts.ServerSideEncryption) - if uerr != nil { - return UploadInfo{}, uerr - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - - // Increment part number. - partNumber++ - - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if rerr == io.EOF { - break - } - } - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - - uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, PutObjectOptions{}) - if err != nil { - return UploadInfo{}, err - } - - uploadInfo.Size = totalUploadedSize - return uploadInfo, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-remove.go b/mantle/vendor/github.com/minio/minio-go/v7/api-remove.go deleted file mode 100644 index f21a72c9..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-remove.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "io" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// RemoveBucket deletes the bucket name. -// -// All objects (including all object versions and delete markers). -// in the bucket must be deleted before successfully attempting this request. -func (c Client) RemoveBucket(ctx context.Context, bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Execute DELETE on bucket. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Remove the location from cache on a successful delete. - c.bucketLocCache.Delete(bucketName) - - return nil -} - -// AdvancedRemoveOptions intended for internal use by replication -type AdvancedRemoveOptions struct { - ReplicationDeleteMarker bool - ReplicationStatus ReplicationStatus - ReplicationMTime time.Time - ReplicationRequest bool -} - -// RemoveObjectOptions represents options specified by user for RemoveObject call -type RemoveObjectOptions struct { - GovernanceBypass bool - VersionID string - Internal AdvancedRemoveOptions -} - -// RemoveObject removes an object from a bucket. -func (c Client) RemoveObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - return c.removeObject(ctx, bucketName, objectName, opts) -} - -func (c Client) removeObject(ctx context.Context, bucketName, objectName string, opts RemoveObjectOptions) error { - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - - // Build headers. - headers := make(http.Header) - - if opts.GovernanceBypass { - // Set the bypass goverenance retention header - headers.Set(amzBypassGovernance, "true") - } - if opts.Internal.ReplicationDeleteMarker { - headers.Set(minIOBucketReplicationDeleteMarker, "true") - } - if !opts.Internal.ReplicationMTime.IsZero() { - headers.Set(minIOBucketSourceMTime, opts.Internal.ReplicationMTime.Format(time.RFC3339Nano)) - } - if !opts.Internal.ReplicationStatus.Empty() { - headers.Set(amzBucketReplicationStatus, string(opts.Internal.ReplicationStatus)) - } - if opts.Internal.ReplicationRequest { - headers.Set(minIOBucketReplicationRequest, "") - } - // Execute DELETE on objectName. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - contentSHA256Hex: emptySHA256Hex, - queryValues: urlValues, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - // if some unexpected error happened and max retry is reached, we want to let client know - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // DeleteObject always responds with http '204' even for - // objects which do not exist. So no need to handle them - // specifically. - return nil -} - -// RemoveObjectError - container of Multi Delete S3 API error -type RemoveObjectError struct { - ObjectName string - VersionID string - Err error -} - -// generateRemoveMultiObjects - generate the XML request for remove multi objects request -func generateRemoveMultiObjectsRequest(objects []ObjectInfo) []byte { - delObjects := []deleteObject{} - for _, obj := range objects { - delObjects = append(delObjects, deleteObject{ - Key: obj.Key, - VersionID: obj.VersionID, - }) - } - xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: delObjects, Quiet: true}) - return xmlBytes -} - -// processRemoveMultiObjectsResponse - parse the remove multi objects web service -// and return the success/failure result status for each object -func processRemoveMultiObjectsResponse(body io.Reader, objects []ObjectInfo, errorCh chan<- RemoveObjectError) { - // Parse multi delete XML response - rmResult := &deleteMultiObjectsResult{} - err := xmlDecoder(body, rmResult) - if err != nil { - errorCh <- RemoveObjectError{ObjectName: "", Err: err} - return - } - - // Fill deletion that returned an error. - for _, obj := range rmResult.UnDeletedObjects { - // Version does not exist is not an error ignore and continue. - switch obj.Code { - case "InvalidArgument", "NoSuchVersion": - continue - } - errorCh <- RemoveObjectError{ - ObjectName: obj.Key, - VersionID: obj.VersionID, - Err: ErrorResponse{ - Code: obj.Code, - Message: obj.Message, - }, - } - } -} - -// RemoveObjectsOptions represents options specified by user for RemoveObjects call -type RemoveObjectsOptions struct { - GovernanceBypass bool -} - -// RemoveObjects removes multiple objects from a bucket while -// it is possible to specify objects versions which are received from -// objectsCh. Remove failures are sent back via error channel. -func (c Client) RemoveObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, opts RemoveObjectsOptions) <-chan RemoveObjectError { - errorCh := make(chan RemoveObjectError, 1) - - // Validate if bucket name is valid. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(errorCh) - errorCh <- RemoveObjectError{ - Err: err, - } - return errorCh - } - // Validate objects channel to be properly allocated. - if objectsCh == nil { - defer close(errorCh) - errorCh <- RemoveObjectError{ - Err: errInvalidArgument("Objects channel cannot be nil"), - } - return errorCh - } - - go c.removeObjects(ctx, bucketName, objectsCh, errorCh, opts) - return errorCh -} - -// Return true if the character is within the allowed characters in an XML 1.0 document -// The list of allowed characters can be found here: https://www.w3.org/TR/xml/#charsets -func validXMLChar(r rune) (ok bool) { - return r == 0x09 || - r == 0x0A || - r == 0x0D || - r >= 0x20 && r <= 0xD7FF || - r >= 0xE000 && r <= 0xFFFD || - r >= 0x10000 && r <= 0x10FFFF -} - -func hasInvalidXMLChar(str string) bool { - for _, s := range str { - if !validXMLChar(s) { - return true - } - } - return false -} - -// Generate and call MultiDelete S3 requests based on entries received from objectsCh -func (c Client) removeObjects(ctx context.Context, bucketName string, objectsCh <-chan ObjectInfo, errorCh chan<- RemoveObjectError, opts RemoveObjectsOptions) { - maxEntries := 1000 - finish := false - urlValues := make(url.Values) - urlValues.Set("delete", "") - - // Close error channel when Multi delete finishes. - defer close(errorCh) - - // Loop over entries by 1000 and call MultiDelete requests - for { - if finish { - break - } - count := 0 - var batch []ObjectInfo - - // Try to gather 1000 entries - for object := range objectsCh { - if hasInvalidXMLChar(object.Key) { - // Use single DELETE so the object name will be in the request URL instead of the multi-delete XML document. - err := c.removeObject(ctx, bucketName, object.Key, RemoveObjectOptions{ - VersionID: object.VersionID, - GovernanceBypass: opts.GovernanceBypass, - }) - if err != nil { - // Version does not exist is not an error ignore and continue. - switch ToErrorResponse(err).Code { - case "InvalidArgument", "NoSuchVersion": - continue - } - errorCh <- RemoveObjectError{ - ObjectName: object.Key, - VersionID: object.VersionID, - Err: err, - } - } - continue - } - - batch = append(batch, object) - if count++; count >= maxEntries { - break - } - } - if count == 0 { - // Multi Objects Delete API doesn't accept empty object list, quit immediately - break - } - if count < maxEntries { - // We didn't have 1000 entries, so this is the last batch - finish = true - } - - // Build headers. - headers := make(http.Header) - if opts.GovernanceBypass { - // Set the bypass goverenance retention header - headers.Set(amzBypassGovernance, "true") - } - - // Generate remove multi objects XML request - removeBytes := generateRemoveMultiObjectsRequest(batch) - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(removeBytes), - contentLength: int64(len(removeBytes)), - contentMD5Base64: sumMD5Base64(removeBytes), - contentSHA256Hex: sum256Hex(removeBytes), - customHeader: headers, - }) - if resp != nil { - if resp.StatusCode != http.StatusOK { - e := httpRespToErrorResponse(resp, bucketName, "") - errorCh <- RemoveObjectError{ObjectName: "", Err: e} - } - } - if err != nil { - for _, b := range batch { - errorCh <- RemoveObjectError{ - ObjectName: b.Key, - VersionID: b.VersionID, - Err: err, - } - } - continue - } - - // Process multiobjects remove xml response - processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) - - closeResponse(resp) - } -} - -// RemoveIncompleteUpload aborts an partially uploaded object. -func (c Client) RemoveIncompleteUpload(ctx context.Context, bucketName, objectName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - // Find multipart upload ids of the object to be aborted. - uploadIDs, err := c.findUploadIDs(ctx, bucketName, objectName) - if err != nil { - return err - } - - for _, uploadID := range uploadIDs { - // abort incomplete multipart upload, based on the upload id passed. - err := c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - if err != nil { - return err - } - } - - return nil -} - -// abortMultipartUpload aborts a multipart upload for the given -// uploadID, all previously uploaded parts are deleted. -func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - - // Execute DELETE on multipart upload. - resp, err := c.executeMethod(ctx, http.MethodDelete, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - // Abort has no response body, handle it for any errors. - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - // This is needed specifically for abort and it cannot - // be converged into default case. - errorResponse = ErrorResponse{ - Code: "NoSuchUpload", - Message: "The specified multipart upload does not exist.", - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - default: - return httpRespToErrorResponse(resp, bucketName, objectName) - } - return errorResponse - } - } - return nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/mantle/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go deleted file mode 100644 index 37ed97b7..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ /dev/null @@ -1,361 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "errors" - "io" - "reflect" - "time" -) - -// listAllMyBucketsResult container for listBuckets response. -type listAllMyBucketsResult struct { - // Container for one or more buckets. - Buckets struct { - Bucket []BucketInfo - } - Owner owner -} - -// owner container for bucket owner information. -type owner struct { - DisplayName string - ID string -} - -// CommonPrefix container for prefix response. -type CommonPrefix struct { - Prefix string -} - -// ListBucketV2Result container for listObjects response version 2. -type ListBucketV2Result struct { - // A response can contain CommonPrefixes only if you have - // specified a delimiter. - CommonPrefixes []CommonPrefix - // Metadata about each object returned. - Contents []ObjectInfo - Delimiter string - - // Encoding type used to encode object keys in the response. - EncodingType string - - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - MaxKeys int64 - Name string - - // Hold the token that will be sent in the next request to fetch the next group of keys - NextContinuationToken string - - ContinuationToken string - Prefix string - - // FetchOwner and StartAfter are currently not used - FetchOwner string - StartAfter string -} - -// Version is an element in the list object versions response -type Version struct { - ETag string - IsLatest bool - Key string - LastModified time.Time - Owner Owner - Size int64 - StorageClass string - VersionID string `xml:"VersionId"` - - isDeleteMarker bool -} - -// ListVersionsResult is an element in the list object versions response -// and has a special Unmarshaler because we need to preserver the order -// of and in ListVersionsResult.Versions slice -type ListVersionsResult struct { - Versions []Version - - CommonPrefixes []CommonPrefix - Name string - Prefix string - Delimiter string - MaxKeys int64 - EncodingType string - IsTruncated bool - KeyMarker string - VersionIDMarker string - NextKeyMarker string - NextVersionIDMarker string -} - -// UnmarshalXML is a custom unmarshal code for the response of ListObjectVersions, the custom -// code will unmarshal and tags and save them in Versions field to -// preserve the lexical order of the listing. -func (l *ListVersionsResult) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { - for { - // Read tokens from the XML document in a stream. - t, err := d.Token() - if err != nil { - if err == io.EOF { - break - } - return err - } - - switch se := t.(type) { - case xml.StartElement: - tagName := se.Name.Local - switch tagName { - case "Name", "Prefix", - "Delimiter", "EncodingType", - "KeyMarker", "NextKeyMarker": - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - v := reflect.ValueOf(l).Elem().FieldByName(tagName) - if v.IsValid() { - v.SetString(s) - } - case "VersionIdMarker": - // VersionIdMarker is a special case because of 'Id' instead of 'ID' in field name - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - l.VersionIDMarker = s - case "NextVersionIdMarker": - // NextVersionIdMarker is a special case because of 'Id' instead of 'ID' in field name - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - l.NextVersionIDMarker = s - case "IsTruncated": // bool - var b bool - if err = d.DecodeElement(&b, &se); err != nil { - return err - } - l.IsTruncated = b - case "MaxKeys": // int64 - var i int64 - if err = d.DecodeElement(&i, &se); err != nil { - return err - } - l.MaxKeys = i - case "CommonPrefixes": - var cp CommonPrefix - if err = d.DecodeElement(&cp, &se); err != nil { - return err - } - l.CommonPrefixes = append(l.CommonPrefixes, cp) - case "DeleteMarker", "Version": - var v Version - if err = d.DecodeElement(&v, &se); err != nil { - return err - } - if tagName == "DeleteMarker" { - v.isDeleteMarker = true - } - l.Versions = append(l.Versions, v) - default: - return errors.New("unrecognized option:" + tagName) - } - - } - } - return nil -} - -// ListBucketResult container for listObjects response. -type ListBucketResult struct { - // A response can contain CommonPrefixes only if you have - // specified a delimiter. - CommonPrefixes []CommonPrefix - // Metadata about each object returned. - Contents []ObjectInfo - Delimiter string - - // Encoding type used to encode object keys in the response. - EncodingType string - - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - Marker string - MaxKeys int64 - Name string - - // When response is truncated (the IsTruncated element value in - // the response is true), you can use the key name in this field - // as marker in the subsequent request to get next set of objects. - // Object storage lists objects in alphabetical order Note: This - // element is returned only if you have delimiter request - // parameter specified. If response does not include the NextMaker - // and it is truncated, you can use the value of the last Key in - // the response as the marker in the subsequent request to get the - // next set of object keys. - NextMarker string - Prefix string -} - -// ListMultipartUploadsResult container for ListMultipartUploads response -type ListMultipartUploadsResult struct { - Bucket string - KeyMarker string - UploadIDMarker string `xml:"UploadIdMarker"` - NextKeyMarker string - NextUploadIDMarker string `xml:"NextUploadIdMarker"` - EncodingType string - MaxUploads int64 - IsTruncated bool - Uploads []ObjectMultipartInfo `xml:"Upload"` - Prefix string - Delimiter string - // A response can contain CommonPrefixes only if you specify a delimiter. - CommonPrefixes []CommonPrefix -} - -// initiator container for who initiated multipart upload. -type initiator struct { - ID string - DisplayName string -} - -// copyObjectResult container for copy object response. -type copyObjectResult struct { - ETag string - LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" -} - -// ObjectPart container for particular part of an object. -type ObjectPart struct { - // Part number identifies the part. - PartNumber int - - // Date and time the part was uploaded. - LastModified time.Time - - // Entity tag returned when the part was uploaded, usually md5sum - // of the part. - ETag string - - // Size of the uploaded part data. - Size int64 -} - -// ListObjectPartsResult container for ListObjectParts response. -type ListObjectPartsResult struct { - Bucket string - Key string - UploadID string `xml:"UploadId"` - - Initiator initiator - Owner owner - - StorageClass string - PartNumberMarker int - NextPartNumberMarker int - MaxParts int - - // Indicates whether the returned list of parts is truncated. - IsTruncated bool - ObjectParts []ObjectPart `xml:"Part"` - - EncodingType string -} - -// initiateMultipartUploadResult container for InitiateMultiPartUpload -// response. -type initiateMultipartUploadResult struct { - Bucket string - Key string - UploadID string `xml:"UploadId"` -} - -// completeMultipartUploadResult container for completed multipart -// upload response. -type completeMultipartUploadResult struct { - Location string - Bucket string - Key string - ETag string -} - -// CompletePart sub container lists individual part numbers and their -// md5sum, part of completeMultipartUpload. -type CompletePart struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` - - // Part number identifies the part. - PartNumber int - ETag string -} - -// completeMultipartUpload container for completing multipart upload. -type completeMultipartUpload struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` - Parts []CompletePart `xml:"Part"` -} - -// createBucketConfiguration container for bucket configuration. -type createBucketConfiguration struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` - Location string `xml:"LocationConstraint"` -} - -// deleteObject container for Delete element in MultiObjects Delete XML request -type deleteObject struct { - Key string - VersionID string `xml:"VersionId,omitempty"` -} - -// deletedObject container for Deleted element in MultiObjects Delete XML response -type deletedObject struct { - Key string - VersionID string `xml:"VersionId,omitempty"` - // These fields are ignored. - DeleteMarker bool - DeleteMarkerVersionID string -} - -// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response -type nonDeletedObject struct { - Key string - Code string - Message string - VersionID string `xml:"VersionId"` -} - -// deletedMultiObjects container for MultiObjects Delete XML request -type deleteMultiObjects struct { - XMLName xml.Name `xml:"Delete"` - Quiet bool - Objects []deleteObject `xml:"Object"` -} - -// deletedMultiObjectsResult container for MultiObjects Delete XML response -type deleteMultiObjectsResult struct { - XMLName xml.Name `xml:"DeleteResult"` - DeletedObjects []deletedObject `xml:"Deleted"` - UnDeletedObjects []nonDeletedObject `xml:"Error"` -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-select.go b/mantle/vendor/github.com/minio/minio-go/v7/api-select.go deleted file mode 100644 index e35cf02b..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-select.go +++ /dev/null @@ -1,751 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2018-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/xml" - "errors" - "fmt" - "hash" - "hash/crc32" - "io" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// CSVFileHeaderInfo - is the parameter for whether to utilize headers. -type CSVFileHeaderInfo string - -// Constants for file header info. -const ( - CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" - CSVFileHeaderInfoIgnore = "IGNORE" - CSVFileHeaderInfoUse = "USE" -) - -// SelectCompressionType - is the parameter for what type of compression is -// present -type SelectCompressionType string - -// Constants for compression types under select API. -const ( - SelectCompressionNONE SelectCompressionType = "NONE" - SelectCompressionGZIP = "GZIP" - SelectCompressionBZIP = "BZIP2" -) - -// CSVQuoteFields - is the parameter for how CSV fields are quoted. -type CSVQuoteFields string - -// Constants for csv quote styles. -const ( - CSVQuoteFieldsAlways CSVQuoteFields = "Always" - CSVQuoteFieldsAsNeeded = "AsNeeded" -) - -// QueryExpressionType - is of what syntax the expression is, this should only -// be SQL -type QueryExpressionType string - -// Constants for expression type. -const ( - QueryExpressionTypeSQL QueryExpressionType = "SQL" -) - -// JSONType determines json input serialization type. -type JSONType string - -// Constants for JSONTypes. -const ( - JSONDocumentType JSONType = "DOCUMENT" - JSONLinesType = "LINES" -) - -// ParquetInputOptions parquet input specific options -type ParquetInputOptions struct{} - -// CSVInputOptions csv input specific options -type CSVInputOptions struct { - FileHeaderInfo CSVFileHeaderInfo - fileHeaderInfoSet bool - - RecordDelimiter string - recordDelimiterSet bool - - FieldDelimiter string - fieldDelimiterSet bool - - QuoteCharacter string - quoteCharacterSet bool - - QuoteEscapeCharacter string - quoteEscapeCharacterSet bool - - Comments string - commentsSet bool -} - -// SetFileHeaderInfo sets the file header info in the CSV input options -func (c *CSVInputOptions) SetFileHeaderInfo(val CSVFileHeaderInfo) { - c.FileHeaderInfo = val - c.fileHeaderInfoSet = true -} - -// SetRecordDelimiter sets the record delimiter in the CSV input options -func (c *CSVInputOptions) SetRecordDelimiter(val string) { - c.RecordDelimiter = val - c.recordDelimiterSet = true -} - -// SetFieldDelimiter sets the field delimiter in the CSV input options -func (c *CSVInputOptions) SetFieldDelimiter(val string) { - c.FieldDelimiter = val - c.fieldDelimiterSet = true -} - -// SetQuoteCharacter sets the quote character in the CSV input options -func (c *CSVInputOptions) SetQuoteCharacter(val string) { - c.QuoteCharacter = val - c.quoteCharacterSet = true -} - -// SetQuoteEscapeCharacter sets the quote escape character in the CSV input options -func (c *CSVInputOptions) SetQuoteEscapeCharacter(val string) { - c.QuoteEscapeCharacter = val - c.quoteEscapeCharacterSet = true -} - -// SetComments sets the comments character in the CSV input options -func (c *CSVInputOptions) SetComments(val string) { - c.Comments = val - c.commentsSet = true -} - -// MarshalXML - produces the xml representation of the CSV input options struct -func (c CSVInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - if c.FileHeaderInfo != "" || c.fileHeaderInfoSet { - if err := e.EncodeElement(c.FileHeaderInfo, xml.StartElement{Name: xml.Name{Local: "FileHeaderInfo"}}); err != nil { - return err - } - } - - if c.RecordDelimiter != "" || c.recordDelimiterSet { - if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { - return err - } - } - - if c.FieldDelimiter != "" || c.fieldDelimiterSet { - if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { - return err - } - } - - if c.QuoteCharacter != "" || c.quoteCharacterSet { - if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { - return err - } - } - - if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { - if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { - return err - } - } - - if c.Comments != "" || c.commentsSet { - if err := e.EncodeElement(c.Comments, xml.StartElement{Name: xml.Name{Local: "Comments"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// CSVOutputOptions csv output specific options -type CSVOutputOptions struct { - QuoteFields CSVQuoteFields - quoteFieldsSet bool - - RecordDelimiter string - recordDelimiterSet bool - - FieldDelimiter string - fieldDelimiterSet bool - - QuoteCharacter string - quoteCharacterSet bool - - QuoteEscapeCharacter string - quoteEscapeCharacterSet bool -} - -// SetQuoteFields sets the quote field parameter in the CSV output options -func (c *CSVOutputOptions) SetQuoteFields(val CSVQuoteFields) { - c.QuoteFields = val - c.quoteFieldsSet = true -} - -// SetRecordDelimiter sets the record delimiter character in the CSV output options -func (c *CSVOutputOptions) SetRecordDelimiter(val string) { - c.RecordDelimiter = val - c.recordDelimiterSet = true -} - -// SetFieldDelimiter sets the field delimiter character in the CSV output options -func (c *CSVOutputOptions) SetFieldDelimiter(val string) { - c.FieldDelimiter = val - c.fieldDelimiterSet = true -} - -// SetQuoteCharacter sets the quote character in the CSV output options -func (c *CSVOutputOptions) SetQuoteCharacter(val string) { - c.QuoteCharacter = val - c.quoteCharacterSet = true -} - -// SetQuoteEscapeCharacter sets the quote escape character in the CSV output options -func (c *CSVOutputOptions) SetQuoteEscapeCharacter(val string) { - c.QuoteEscapeCharacter = val - c.quoteEscapeCharacterSet = true -} - -// MarshalXML - produces the xml representation of the CSVOutputOptions struct -func (c CSVOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - if c.QuoteFields != "" || c.quoteFieldsSet { - if err := e.EncodeElement(c.QuoteFields, xml.StartElement{Name: xml.Name{Local: "QuoteFields"}}); err != nil { - return err - } - } - - if c.RecordDelimiter != "" || c.recordDelimiterSet { - if err := e.EncodeElement(c.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { - return err - } - } - - if c.FieldDelimiter != "" || c.fieldDelimiterSet { - if err := e.EncodeElement(c.FieldDelimiter, xml.StartElement{Name: xml.Name{Local: "FieldDelimiter"}}); err != nil { - return err - } - } - - if c.QuoteCharacter != "" || c.quoteCharacterSet { - if err := e.EncodeElement(c.QuoteCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteCharacter"}}); err != nil { - return err - } - } - - if c.QuoteEscapeCharacter != "" || c.quoteEscapeCharacterSet { - if err := e.EncodeElement(c.QuoteEscapeCharacter, xml.StartElement{Name: xml.Name{Local: "QuoteEscapeCharacter"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// JSONInputOptions json input specific options -type JSONInputOptions struct { - Type JSONType - typeSet bool -} - -// SetType sets the JSON type in the JSON input options -func (j *JSONInputOptions) SetType(typ JSONType) { - j.Type = typ - j.typeSet = true -} - -// MarshalXML - produces the xml representation of the JSONInputOptions struct -func (j JSONInputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - if j.Type != "" || j.typeSet { - if err := e.EncodeElement(j.Type, xml.StartElement{Name: xml.Name{Local: "Type"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// JSONOutputOptions - json output specific options -type JSONOutputOptions struct { - RecordDelimiter string - recordDelimiterSet bool -} - -// SetRecordDelimiter sets the record delimiter in the JSON output options -func (j *JSONOutputOptions) SetRecordDelimiter(val string) { - j.RecordDelimiter = val - j.recordDelimiterSet = true -} - -// MarshalXML - produces the xml representation of the JSONOutputOptions struct -func (j JSONOutputOptions) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - if j.RecordDelimiter != "" || j.recordDelimiterSet { - if err := e.EncodeElement(j.RecordDelimiter, xml.StartElement{Name: xml.Name{Local: "RecordDelimiter"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// SelectObjectInputSerialization - input serialization parameters -type SelectObjectInputSerialization struct { - CompressionType SelectCompressionType - Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` - CSV *CSVInputOptions `xml:"CSV,omitempty"` - JSON *JSONInputOptions `xml:"JSON,omitempty"` -} - -// SelectObjectOutputSerialization - output serialization parameters. -type SelectObjectOutputSerialization struct { - CSV *CSVOutputOptions `xml:"CSV,omitempty"` - JSON *JSONOutputOptions `xml:"JSON,omitempty"` -} - -// SelectObjectOptions - represents the input select body -type SelectObjectOptions struct { - XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` - ServerSideEncryption encrypt.ServerSide `xml:"-"` - Expression string - ExpressionType QueryExpressionType - InputSerialization SelectObjectInputSerialization - OutputSerialization SelectObjectOutputSerialization - RequestProgress struct { - Enabled bool - } -} - -// Header returns the http.Header representation of the SelectObject options. -func (o SelectObjectOptions) Header() http.Header { - headers := make(http.Header) - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { - o.ServerSideEncryption.Marshal(headers) - } - return headers -} - -// SelectObjectType - is the parameter which defines what type of object the -// operation is being performed on. -type SelectObjectType string - -// Constants for input data types. -const ( - SelectObjectTypeCSV SelectObjectType = "CSV" - SelectObjectTypeJSON = "JSON" - SelectObjectTypeParquet = "Parquet" -) - -// preludeInfo is used for keeping track of necessary information from the -// prelude. -type preludeInfo struct { - totalLen uint32 - headerLen uint32 -} - -// SelectResults is used for the streaming responses from the server. -type SelectResults struct { - pipeReader *io.PipeReader - resp *http.Response - stats *StatsMessage - progress *ProgressMessage -} - -// ProgressMessage is a struct for progress xml message. -type ProgressMessage struct { - XMLName xml.Name `xml:"Progress" json:"-"` - StatsMessage -} - -// StatsMessage is a struct for stat xml message. -type StatsMessage struct { - XMLName xml.Name `xml:"Stats" json:"-"` - BytesScanned int64 - BytesProcessed int64 - BytesReturned int64 -} - -// messageType represents the type of message. -type messageType string - -const ( - errorMsg messageType = "error" - commonMsg = "event" -) - -// eventType represents the type of event. -type eventType string - -// list of event-types returned by Select API. -const ( - endEvent eventType = "End" - recordsEvent = "Records" - progressEvent = "Progress" - statsEvent = "Stats" -) - -// contentType represents content type of event. -type contentType string - -const ( - xmlContent contentType = "text/xml" -) - -// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. -func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - - selectReqBytes, err := xml.Marshal(opts) - if err != nil { - return nil, err - } - - urlValues := make(url.Values) - urlValues.Set("select", "") - urlValues.Set("select-type", "2") - - // Execute POST on bucket/object. - resp, err := c.executeMethod(ctx, http.MethodPost, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: opts.Header(), - contentMD5Base64: sumMD5Base64(selectReqBytes), - contentSHA256Hex: sum256Hex(selectReqBytes), - contentBody: bytes.NewReader(selectReqBytes), - contentLength: int64(len(selectReqBytes)), - }) - if err != nil { - return nil, err - } - - return NewSelectResults(resp, bucketName) -} - -// NewSelectResults creates a Select Result parser that parses the response -// and returns a Reader that will return parsed and assembled select output. -func NewSelectResults(resp *http.Response, bucketName string) (*SelectResults, error) { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - - pipeReader, pipeWriter := io.Pipe() - streamer := &SelectResults{ - resp: resp, - stats: &StatsMessage{}, - progress: &ProgressMessage{}, - pipeReader: pipeReader, - } - streamer.start(pipeWriter) - return streamer, nil -} - -// Close - closes the underlying response body and the stream reader. -func (s *SelectResults) Close() error { - defer closeResponse(s.resp) - return s.pipeReader.Close() -} - -// Read - is a reader compatible implementation for SelectObjectContent records. -func (s *SelectResults) Read(b []byte) (n int, err error) { - return s.pipeReader.Read(b) -} - -// Stats - information about a request's stats when processing is complete. -func (s *SelectResults) Stats() *StatsMessage { - return s.stats -} - -// Progress - information about the progress of a request. -func (s *SelectResults) Progress() *ProgressMessage { - return s.progress -} - -// start is the main function that decodes the large byte array into -// several events that are sent through the eventstream. -func (s *SelectResults) start(pipeWriter *io.PipeWriter) { - go func() { - for { - var prelude preludeInfo - var headers = make(http.Header) - var err error - - // Create CRC code - crc := crc32.New(crc32.IEEETable) - crcReader := io.TeeReader(s.resp.Body, crc) - - // Extract the prelude(12 bytes) into a struct to extract relevant information. - prelude, err = processPrelude(crcReader, crc) - if err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - - // Extract the headers(variable bytes) into a struct to extract relevant information - if prelude.headerLen > 0 { - if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - } - - // Get the actual payload length so that the appropriate amount of - // bytes can be read or parsed. - payloadLen := prelude.PayloadLen() - - m := messageType(headers.Get("message-type")) - - switch m { - case errorMsg: - pipeWriter.CloseWithError(errors.New(headers.Get("error-code") + ":\"" + headers.Get("error-message") + "\"")) - closeResponse(s.resp) - return - case commonMsg: - // Get content-type of the payload. - c := contentType(headers.Get("content-type")) - - // Get event type of the payload. - e := eventType(headers.Get("event-type")) - - // Handle all supported events. - switch e { - case endEvent: - pipeWriter.Close() - closeResponse(s.resp) - return - case recordsEvent: - if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - case progressEvent: - switch c { - case xmlContent: - if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - default: - pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) - closeResponse(s.resp) - return - } - case statsEvent: - switch c { - case xmlContent: - if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - default: - pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) - closeResponse(s.resp) - return - } - } - } - - // Ensures that the full message's CRC is correct and - // that the message is not corrupted - if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - - } - }() -} - -// PayloadLen is a function that calculates the length of the payload. -func (p preludeInfo) PayloadLen() int64 { - return int64(p.totalLen - p.headerLen - 16) -} - -// processPrelude is the function that reads the 12 bytes of the prelude and -// ensures the CRC is correct while also extracting relevant information into -// the struct, -func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { - var err error - var pInfo = preludeInfo{} - - // reads total length of the message (first 4 bytes) - pInfo.totalLen, err = extractUint32(prelude) - if err != nil { - return pInfo, err - } - - // reads total header length of the message (2nd 4 bytes) - pInfo.headerLen, err = extractUint32(prelude) - if err != nil { - return pInfo, err - } - - // checks that the CRC is correct (3rd 4 bytes) - preCRC := crc.Sum32() - if err := checkCRC(prelude, preCRC); err != nil { - return pInfo, err - } - - return pInfo, nil -} - -// extracts the relevant information from the Headers. -func extractHeader(body io.Reader, myHeaders http.Header) error { - for { - // extracts the first part of the header, - headerTypeName, err := extractHeaderType(body) - if err != nil { - // Since end of file, we have read all of our headers - if err == io.EOF { - break - } - return err - } - - // reads the 7 present in the header and ignores it. - extractUint8(body) - - headerValueName, err := extractHeaderValue(body) - if err != nil { - return err - } - - myHeaders.Set(headerTypeName, headerValueName) - - } - return nil -} - -// extractHeaderType extracts the first half of the header message, the header type. -func extractHeaderType(body io.Reader) (string, error) { - // extracts 2 bit integer - headerNameLen, err := extractUint8(body) - if err != nil { - return "", err - } - // extracts the string with the appropriate number of bytes - headerName, err := extractString(body, int(headerNameLen)) - if err != nil { - return "", err - } - return strings.TrimPrefix(headerName, ":"), nil -} - -// extractsHeaderValue extracts the second half of the header message, the -// header value -func extractHeaderValue(body io.Reader) (string, error) { - bodyLen, err := extractUint16(body) - if err != nil { - return "", err - } - bodyName, err := extractString(body, int(bodyLen)) - if err != nil { - return "", err - } - return bodyName, nil -} - -// extracts a string from byte array of a particular number of bytes. -func extractString(source io.Reader, lenBytes int) (string, error) { - myVal := make([]byte, lenBytes) - _, err := source.Read(myVal) - if err != nil { - return "", err - } - return string(myVal), nil -} - -// extractUint32 extracts a 4 byte integer from the byte array. -func extractUint32(r io.Reader) (uint32, error) { - buf := make([]byte, 4) - _, err := readFull(r, buf) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(buf), nil -} - -// extractUint16 extracts a 2 byte integer from the byte array. -func extractUint16(r io.Reader) (uint16, error) { - buf := make([]byte, 2) - _, err := readFull(r, buf) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(buf), nil -} - -// extractUint8 extracts a 1 byte integer from the byte array. -func extractUint8(r io.Reader) (uint8, error) { - buf := make([]byte, 1) - _, err := readFull(r, buf) - if err != nil { - return 0, err - } - return buf[0], nil -} - -// checkCRC ensures that the CRC matches with the one from the reader. -func checkCRC(r io.Reader, expect uint32) error { - msgCRC, err := extractUint32(r) - if err != nil { - return err - } - - if msgCRC != expect { - return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) - - } - return nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api-stat.go b/mantle/vendor/github.com/minio/minio-go/v7/api-stat.go deleted file mode 100644 index aa81cc43..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api-stat.go +++ /dev/null @@ -1,127 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net/http" - "net/url" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// BucketExists verifies if bucket exists and you have permission to access it. Allows for a Context to -// control cancellations and timeouts. -func (c Client) BucketExists(ctx context.Context, bucketName string) (bool, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return false, err - } - - // Execute HEAD on bucketName. - resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - if ToErrorResponse(err).Code == "NoSuchBucket" { - return false, nil - } - return false, err - } - if resp != nil { - resperr := httpRespToErrorResponse(resp, bucketName, "") - if ToErrorResponse(resperr).Code == "NoSuchBucket" { - return false, nil - } - if resp.StatusCode != http.StatusOK { - return false, httpRespToErrorResponse(resp, bucketName, "") - } - } - return true, nil -} - -// StatObject verifies if object exists and you have permission to access. -func (c Client) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - return c.statObject(ctx, bucketName, objectName, opts) -} - -// Lower level API for statObject supporting pre-conditions and range headers. -func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - headers := opts.Header() - if opts.Internal.ReplicationDeleteMarker { - headers.Set(minIOBucketReplicationDeleteMarker, "true") - } - - urlValues := make(url.Values) - if opts.VersionID != "" { - urlValues.Set("versionId", opts.VersionID) - } - // Execute HEAD on objectName. - resp, err := c.executeMethod(ctx, http.MethodHead, requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - deleteMarker := resp.Header.Get(amzDeleteMarker) == "true" - - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - if resp.StatusCode == http.StatusBadRequest && opts.VersionID != "" && deleteMarker { - errResp := ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "MethodNotAllowed", - Message: "The specified method is not allowed against this resource.", - BucketName: bucketName, - Key: objectName, - } - return ObjectInfo{ - VersionID: resp.Header.Get(amzVersionID), - IsDeleteMarker: deleteMarker, - }, errResp - } - return ObjectInfo{ - VersionID: resp.Header.Get(amzVersionID), - IsDeleteMarker: deleteMarker, - }, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - return ToObjectInfo(bucketName, objectName, resp.Header) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/api.go b/mantle/vendor/github.com/minio/minio-go/v7/api.go deleted file mode 100644 index 3ca67f37..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/api.go +++ /dev/null @@ -1,896 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/http/cookiejar" - "net/http/httputil" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" - - md5simd "github.com/minio/md5-simd" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/signer" - "golang.org/x/net/publicsuffix" -) - -// Client implements Amazon S3 compatible methods. -type Client struct { - /// Standard options. - - // Parsed endpoint url provided by the user. - endpointURL *url.URL - - // Holds various credential providers. - credsProvider *credentials.Credentials - - // Custom signerType value overrides all credentials. - overrideSignerType credentials.SignatureType - - // User supplied. - appInfo struct { - appName string - appVersion string - } - - // Indicate whether we are using https or not - secure bool - - // Needs allocation. - httpClient *http.Client - bucketLocCache *bucketLocationCache - - // Advanced functionality. - isTraceEnabled bool - traceErrorsOnly bool - traceOutput io.Writer - - // S3 specific accelerated endpoint. - s3AccelerateEndpoint string - - // Region endpoint - region string - - // Random seed. - random *rand.Rand - - // lookup indicates type of url lookup supported by server. If not specified, - // default to Auto. - lookup BucketLookupType - - // Factory for MD5 hash functions. - md5Hasher func() md5simd.Hasher - sha256Hasher func() md5simd.Hasher -} - -// Options for New method -type Options struct { - Creds *credentials.Credentials - Secure bool - Transport http.RoundTripper - Region string - BucketLookup BucketLookupType - - // Custom hash routines. Leave nil to use standard. - CustomMD5 func() md5simd.Hasher - CustomSHA256 func() md5simd.Hasher -} - -// Global constants. -const ( - libraryName = "minio-go" - libraryVersion = "v7.0.12" -) - -// User Agent should always following the below style. -// Please open an issue to discuss any new changes here. -// -// MinIO (OS; ARCH) LIB/VER APP/VER -const ( - libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " - libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion -) - -// BucketLookupType is type of url lookup supported by server. -type BucketLookupType int - -// Different types of url lookup supported by the server.Initialized to BucketLookupAuto -const ( - BucketLookupAuto BucketLookupType = iota - BucketLookupDNS - BucketLookupPath -) - -// New - instantiate minio client with options -func New(endpoint string, opts *Options) (*Client, error) { - if opts == nil { - return nil, errors.New("no options provided") - } - clnt, err := privateNew(endpoint, opts) - if err != nil { - return nil, err - } - // Google cloud storage should be set to signature V2, force it if not. - if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { - clnt.overrideSignerType = credentials.SignatureV2 - } - // If Amazon S3 set to signature v4. - if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { - clnt.overrideSignerType = credentials.SignatureV4 - } - - return clnt, nil -} - -// EndpointURL returns the URL of the S3 endpoint. -func (c *Client) EndpointURL() *url.URL { - endpoint := *c.endpointURL // copy to prevent callers from modifying internal state - return &endpoint -} - -// lockedRandSource provides protected rand source, implements rand.Source interface. -type lockedRandSource struct { - lk sync.Mutex - src rand.Source -} - -// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. -func (r *lockedRandSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -// Seed uses the provided seed value to initialize the generator to a -// deterministic state. -func (r *lockedRandSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// Redirect requests by re signing the request. -func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { - if len(via) >= 5 { - return errors.New("stopped after 5 redirects") - } - if len(via) == 0 { - return nil - } - lastRequest := via[len(via)-1] - var reAuth bool - for attr, val := range lastRequest.Header { - // if hosts do not match do not copy Authorization header - if attr == "Authorization" && req.Host != lastRequest.Host { - reAuth = true - continue - } - if _, ok := req.Header[attr]; !ok { - req.Header[attr] = val - } - } - - *c.endpointURL = *req.URL - - value, err := c.credsProvider.Get() - if err != nil { - return err - } - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - region = c.region - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - if reAuth { - // Check if there is no region override, if not get it from the URL if possible. - if region == "" { - region = s3utils.GetRegionFromURL(*c.endpointURL) - } - switch { - case signerType.IsV2(): - return errors.New("signature V2 cannot support redirection") - case signerType.IsV4(): - signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) - } - } - return nil -} - -func privateNew(endpoint string, opts *Options) (*Client, error) { - // construct endpoint. - endpointURL, err := getEndpointURL(endpoint, opts.Secure) - if err != nil { - return nil, err - } - - // Initialize cookies to preserve server sent cookies if any and replay - // them upon each request. - jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) - if err != nil { - return nil, err - } - - // instantiate new Client. - clnt := new(Client) - - // Save the credentials. - clnt.credsProvider = opts.Creds - - // Remember whether we are using https or not - clnt.secure = opts.Secure - - // Save endpoint URL, user agent for future uses. - clnt.endpointURL = endpointURL - - transport := opts.Transport - if transport == nil { - transport, err = DefaultTransport(opts.Secure) - if err != nil { - return nil, err - } - } - - // Instantiate http client and bucket location cache. - clnt.httpClient = &http.Client{ - Jar: jar, - Transport: transport, - CheckRedirect: clnt.redirectHeaders, - } - - // Sets custom region, if region is empty bucket location cache is used automatically. - if opts.Region == "" { - opts.Region = s3utils.GetRegionFromURL(*clnt.endpointURL) - } - clnt.region = opts.Region - - // Instantiate bucket location cache. - clnt.bucketLocCache = newBucketLocationCache() - - // Introduce a new locked random seed. - clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) - - // Add default md5 hasher. - clnt.md5Hasher = opts.CustomMD5 - clnt.sha256Hasher = opts.CustomSHA256 - if clnt.md5Hasher == nil { - clnt.md5Hasher = newMd5Hasher - } - if clnt.sha256Hasher == nil { - clnt.sha256Hasher = newSHA256Hasher - } - // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined - // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. - clnt.lookup = opts.BucketLookup - // Return. - return clnt, nil -} - -// SetAppInfo - add application details to user agent. -func (c *Client) SetAppInfo(appName string, appVersion string) { - // if app name and version not set, we do not set a new user agent. - if appName != "" && appVersion != "" { - c.appInfo.appName = appName - c.appInfo.appVersion = appVersion - } -} - -// TraceOn - enable HTTP tracing. -func (c *Client) TraceOn(outputStream io.Writer) { - // if outputStream is nil then default to os.Stdout. - if outputStream == nil { - outputStream = os.Stdout - } - // Sets a new output stream. - c.traceOutput = outputStream - - // Enable tracing. - c.isTraceEnabled = true -} - -// TraceErrorsOnlyOn - same as TraceOn, but only errors will be traced. -func (c *Client) TraceErrorsOnlyOn(outputStream io.Writer) { - c.TraceOn(outputStream) - c.traceErrorsOnly = true -} - -// TraceErrorsOnlyOff - Turns off the errors only tracing and everything will be traced after this call. -// If all tracing needs to be turned off, call TraceOff(). -func (c *Client) TraceErrorsOnlyOff() { - c.traceErrorsOnly = false -} - -// TraceOff - disable HTTP tracing. -func (c *Client) TraceOff() { - // Disable tracing. - c.isTraceEnabled = false - c.traceErrorsOnly = false -} - -// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your -// requests. This feature is only specific to S3 for all other endpoints this -// function does nothing. To read further details on s3 transfer acceleration -// please vist - -// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - c.s3AccelerateEndpoint = accelerateEndpoint - } -} - -// Hash materials provides relevant initialized hash algo writers -// based on the expected signature type. -// -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. -func (c *Client) hashMaterials(isMd5Requested bool) (hashAlgos map[string]md5simd.Hasher, hashSums map[string][]byte) { - hashSums = make(map[string][]byte) - hashAlgos = make(map[string]md5simd.Hasher) - if c.overrideSignerType.IsV4() { - if c.secure { - hashAlgos["md5"] = c.md5Hasher() - } else { - hashAlgos["sha256"] = c.sha256Hasher() - } - } else { - if c.overrideSignerType.IsAnonymous() { - hashAlgos["md5"] = c.md5Hasher() - } - } - if isMd5Requested { - hashAlgos["md5"] = c.md5Hasher() - } - return hashAlgos, hashSums -} - -// requestMetadata - is container for all the values to make a request. -type requestMetadata struct { - // If set newRequest presigns the URL. - presignURL bool - - // User supplied. - bucketName string - objectName string - queryValues url.Values - customHeader http.Header - expires int64 - - // Generated by our internal code. - bucketLocation string - contentBody io.Reader - contentLength int64 - contentMD5Base64 string // carries base64 encoded md5sum - contentSHA256Hex string // carries hex encoded sha256sum -} - -// dumpHTTP - dump HTTP request and response. -func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { - // Starts http dump. - _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") - if err != nil { - return err - } - - // Filter out Signature field from Authorization header. - origAuth := req.Header.Get("Authorization") - if origAuth != "" { - req.Header.Set("Authorization", redactSignature(origAuth)) - } - - // Only display request header. - reqTrace, err := httputil.DumpRequestOut(req, false) - if err != nil { - return err - } - - // Write request to trace output. - _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) - if err != nil { - return err - } - - // Only display response header. - var respTrace []byte - - // For errors we make sure to dump response body as well. - if resp.StatusCode != http.StatusOK && - resp.StatusCode != http.StatusPartialContent && - resp.StatusCode != http.StatusNoContent { - respTrace, err = httputil.DumpResponse(resp, true) - if err != nil { - return err - } - } else { - respTrace, err = httputil.DumpResponse(resp, false) - if err != nil { - return err - } - } - - // Write response to trace output. - _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) - if err != nil { - return err - } - - // Ends the http dump. - _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") - if err != nil { - return err - } - - // Returns success. - return nil -} - -// do - execute http request. -func (c Client) do(req *http.Request) (*http.Response, error) { - resp, err := c.httpClient.Do(req) - if err != nil { - // Handle this specifically for now until future Golang versions fix this issue properly. - if urlErr, ok := err.(*url.Error); ok { - if strings.Contains(urlErr.Err.Error(), "EOF") { - return nil, &url.Error{ - Op: urlErr.Op, - URL: urlErr.URL, - Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), - } - } - } - return nil, err - } - - // Response cannot be non-nil, report error if thats the case. - if resp == nil { - msg := "Response is empty. " + reportIssue - return nil, errInvalidArgument(msg) - } - - // If trace is enabled, dump http request and response, - // except when the traceErrorsOnly enabled and the response's status code is ok - if c.isTraceEnabled && !(c.traceErrorsOnly && resp.StatusCode == http.StatusOK) { - err = c.dumpHTTP(req, resp) - if err != nil { - return nil, err - } - } - - return resp, nil -} - -// List of success status. -var successStatus = []int{ - http.StatusOK, - http.StatusNoContent, - http.StatusPartialContent, -} - -// executeMethod - instantiates a given method, and retries the -// request upon any error up to maxRetries attempts in a binomially -// delayed manner using a standard back off algorithm. -func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { - var retryable bool // Indicates if request can be retried. - var bodySeeker io.Seeker // Extracted seeker from io.Reader. - var reqRetry = MaxRetry // Indicates how many times we can retry the request - - if metadata.contentBody != nil { - // Check if body is seekable then it is retryable. - bodySeeker, retryable = metadata.contentBody.(io.Seeker) - switch bodySeeker { - case os.Stdin, os.Stdout, os.Stderr: - retryable = false - } - // Retry only when reader is seekable - if !retryable { - reqRetry = 1 - } - - // Figure out if the body can be closed - if yes - // we will definitely close it upon the function - // return. - bodyCloser, ok := metadata.contentBody.(io.Closer) - if ok { - defer bodyCloser.Close() - } - } - - // Create cancel context to control 'newRetryTimer' go routine. - retryCtx, cancel := context.WithCancel(ctx) - - // Indicate to our routine to exit cleanly upon return. - defer cancel() - - for range c.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { - // Retry executes the following function body if request has an - // error until maxRetries have been exhausted, retry attempts are - // performed after waiting for a given period of time in a - // binomial fashion. - if retryable { - // Seek back to beginning for each attempt. - if _, err = bodySeeker.Seek(0, 0); err != nil { - // If seek failed, no need to retry. - return nil, err - } - } - - // Instantiate a new request. - var req *http.Request - req, err = c.newRequest(ctx, method, metadata) - if err != nil { - errResponse := ToErrorResponse(err) - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - return nil, err - } - - // Initiate the request. - res, err = c.do(req) - if err != nil { - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return nil, err - } - - // Retry the request - continue - } - - // For any known successful http status, return quickly. - for _, httpStatus := range successStatus { - if httpStatus == res.StatusCode { - return res, nil - } - } - - // Read the body to be saved later. - errBodyBytes, err := ioutil.ReadAll(res.Body) - // res.Body should be closed - closeResponse(res) - if err != nil { - return nil, err - } - - // Save the body. - errBodySeeker := bytes.NewReader(errBodyBytes) - res.Body = ioutil.NopCloser(errBodySeeker) - - // For errors verify if its retryable otherwise fail quickly. - errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) - - // Save the body back again. - errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = ioutil.NopCloser(errBodySeeker) - - // Bucket region if set in error response and the error - // code dictates invalid region, we can retry the request - // with the new region. - // - // Additionally we should only retry if bucketLocation and custom - // region is empty. - if c.region == "" { - switch errResponse.Code { - case "AuthorizationHeaderMalformed": - fallthrough - case "InvalidRegion": - fallthrough - case "AccessDenied": - if errResponse.Region == "" { - // Region is empty we simply return the error. - return res, err - } - // Region is not empty figure out a way to - // handle this appropriately. - if metadata.bucketName != "" { - // Gather Cached location only if bucketName is present. - if location, cachedOk := c.bucketLocCache.Get(metadata.bucketName); cachedOk && location != errResponse.Region { - c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) - continue // Retry. - } - } else { - // This is for ListBuckets() fallback. - if errResponse.Region != metadata.bucketLocation { - // Retry if the error response has a different region - // than the request we just made. - metadata.bucketLocation = errResponse.Region - continue // Retry - } - } - } - } - - // Verify if error response code is retryable. - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - - // Verify if http status code is retryable. - if isHTTPStatusRetryable(res.StatusCode) { - continue // Retry. - } - - // For all other cases break out of the retry loop. - break - } - - // Return an error when retry is canceled or deadlined - if e := retryCtx.Err(); e != nil { - return nil, e - } - - return res, err -} - -// newRequest - instantiate a new HTTP request for a given method. -func (c Client) newRequest(ctx context.Context, method string, metadata requestMetadata) (req *http.Request, err error) { - // If no method is supplied default to 'POST'. - if method == "" { - method = http.MethodPost - } - - location := metadata.bucketLocation - if location == "" { - if metadata.bucketName != "" { - // Gather location only if bucketName is present. - location, err = c.getBucketLocation(ctx, metadata.bucketName) - if err != nil { - return nil, err - } - } - if location == "" { - location = getDefaultLocation(*c.endpointURL, c.region) - } - } - - // Look if target url supports virtual host. - // We explicitly disallow MakeBucket calls to not use virtual DNS style, - // since the resolution may fail. - isMakeBucket := (metadata.objectName == "" && method == http.MethodPut && len(metadata.queryValues) == 0) - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) && !isMakeBucket - - // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, - isVirtualHost, metadata.queryValues) - if err != nil { - return nil, err - } - - // Initialize a new HTTP request for the method. - req, err = http.NewRequestWithContext(ctx, method, targetURL.String(), nil) - if err != nil { - return nil, err - } - - // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - // Generate presign url if needed, return right here. - if metadata.expires != 0 && metadata.presignURL { - if signerType.IsAnonymous() { - return nil, errInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") - } - if signerType.IsV2() { - // Presign URL with signature v2. - req = signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) - } else if signerType.IsV4() { - // Presign URL with signature v4. - req = signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) - } - return req, nil - } - - // Set 'User-Agent' header for the request. - c.setUserAgent(req) - - // Set all headers. - for k, v := range metadata.customHeader { - req.Header.Set(k, v[0]) - } - - // Go net/http notoriously closes the request body. - // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. - // This can cause underlying *os.File seekers to fail, avoid that - // by making sure to wrap the closer as a nop. - if metadata.contentLength == 0 { - req.Body = nil - } else { - req.Body = ioutil.NopCloser(metadata.contentBody) - } - - // Set incoming content-length. - req.ContentLength = metadata.contentLength - if req.ContentLength <= -1 { - // For unknown content length, we upload using transfer-encoding: chunked. - req.TransferEncoding = []string{"chunked"} - } - - // set md5Sum for content protection. - if len(metadata.contentMD5Base64) > 0 { - req.Header.Set("Content-Md5", metadata.contentMD5Base64) - } - - // For anonymous requests just return. - if signerType.IsAnonymous() { - return req, nil - } - - switch { - case signerType.IsV2(): - // Add signature version '2' authorization header. - req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) - case metadata.objectName != "" && metadata.queryValues == nil && method == http.MethodPut && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: - // Streaming signature is used by default for a PUT object request. Additionally we also - // look if the initialized client is secure, if yes then we don't need to perform - // streaming signature. - req = signer.StreamingSignV4(req, accessKeyID, - secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) - default: - // Set sha256 sum for signature calculation only with signature version '4'. - shaHeader := unsignedPayload - if metadata.contentSHA256Hex != "" { - shaHeader = metadata.contentSHA256Hex - } - req.Header.Set("X-Amz-Content-Sha256", shaHeader) - - // Add signature version '4' authorization header. - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) - } - - // Return request. - return req, nil -} - -// set User agent. -func (c Client) setUserAgent(req *http.Request) { - req.Header.Set("User-Agent", libraryUserAgent) - if c.appInfo.appName != "" && c.appInfo.appVersion != "" { - req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) - } -} - -// makeTargetURL make a new target url. -func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { - host := c.endpointURL.Host - // For Amazon S3 endpoint, try to fetch location based endpoint. - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - if c.s3AccelerateEndpoint != "" && bucketName != "" { - // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html - // Disable transfer acceleration for non-compliant bucket names. - if strings.Contains(bucketName, ".") { - return nil, errTransferAccelerationBucket(bucketName) - } - // If transfer acceleration is requested set new host. - // For more details about enabling transfer acceleration read here. - // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html - host = c.s3AccelerateEndpoint - } else { - // Do not change the host if the endpoint URL is a FIPS S3 endpoint. - if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { - // Fetch new host based on the bucket location. - host = getS3Endpoint(bucketLocation) - } - } - } - - // Save scheme. - scheme := c.endpointURL.Scheme - - // Strip port 80 and 443 so we won't send these ports in Host header. - // The reason is that browsers and curl automatically remove :80 and :443 - // with the generated presigned urls, then a signature mismatch error. - if h, p, err := net.SplitHostPort(host); err == nil { - if scheme == "http" && p == "80" || scheme == "https" && p == "443" { - host = h - } - } - - urlStr := scheme + "://" + host + "/" - // Make URL only if bucketName is available, otherwise use the - // endpoint URL. - if bucketName != "" { - // If endpoint supports virtual host style use that always. - // Currently only S3 and Google Cloud Storage would support - // virtual host style. - if isVirtualHostStyle { - urlStr = scheme + "://" + bucketName + "." + host + "/" - if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) - } - } else { - // If not fall back to using path style. - urlStr = urlStr + bucketName + "/" - if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) - } - } - } - - // If there are any query values, add them to the end. - if len(queryValues) > 0 { - urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) - } - - return url.Parse(urlStr) -} - -// returns true if virtual hosted style requests are to be used. -func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { - if bucketName == "" { - return false - } - - if c.lookup == BucketLookupDNS { - return true - } - if c.lookup == BucketLookupPath { - return false - } - - // default to virtual only for Amazon/Google storage. In all other cases use - // path style requests - return s3utils.IsVirtualHostSupported(url, bucketName) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/bucket-cache.go b/mantle/vendor/github.com/minio/minio-go/v7/bucket-cache.go deleted file mode 100644 index 156150f6..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/bucket-cache.go +++ /dev/null @@ -1,253 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net" - "net/http" - "net/url" - "path" - "sync" - - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/signer" -) - -// bucketLocationCache - Provides simple mechanism to hold bucket -// locations in memory. -type bucketLocationCache struct { - // mutex is used for handling the concurrent - // read/write requests for cache. - sync.RWMutex - - // items holds the cached bucket locations. - items map[string]string -} - -// newBucketLocationCache - Provides a new bucket location cache to be -// used internally with the client object. -func newBucketLocationCache() *bucketLocationCache { - return &bucketLocationCache{ - items: make(map[string]string), - } -} - -// Get - Returns a value of a given key if it exists. -func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { - r.RLock() - defer r.RUnlock() - location, ok = r.items[bucketName] - return -} - -// Set - Will persist a value into cache. -func (r *bucketLocationCache) Set(bucketName string, location string) { - r.Lock() - defer r.Unlock() - r.items[bucketName] = location -} - -// Delete - Deletes a bucket name from cache. -func (r *bucketLocationCache) Delete(bucketName string) { - r.Lock() - defer r.Unlock() - delete(r.items, bucketName) -} - -// GetBucketLocation - get location for the bucket name from location cache, if not -// fetch freshly by making a new request. -func (c Client) GetBucketLocation(ctx context.Context, bucketName string) (string, error) { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - return c.getBucketLocation(ctx, bucketName) -} - -// getBucketLocation - Get location for the bucketName from location map cache, if not -// fetch freshly by making a new request. -func (c Client) getBucketLocation(ctx context.Context, bucketName string) (string, error) { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - - // Region set then no need to fetch bucket location. - if c.region != "" { - return c.region, nil - } - - if location, ok := c.bucketLocCache.Get(bucketName); ok { - return location, nil - } - - // Initialize a new request. - req, err := c.getBucketLocationRequest(ctx, bucketName) - if err != nil { - return "", err - } - - // Initiate the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return "", err - } - location, err := processBucketLocationResponse(resp, bucketName) - if err != nil { - return "", err - } - c.bucketLocCache.Set(bucketName, location) - return location, nil -} - -// processes the getBucketLocation http response from the server. -func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { - if resp != nil { - if resp.StatusCode != http.StatusOK { - err = httpRespToErrorResponse(resp, bucketName, "") - errResp := ToErrorResponse(err) - // For access denied error, it could be an anonymous - // request. Move forward and let the top level callers - // succeed if possible based on their policy. - switch errResp.Code { - case "NotImplemented": - if errResp.Server == "AmazonSnowball" { - return "snowball", nil - } - case "AuthorizationHeaderMalformed": - fallthrough - case "InvalidRegion": - fallthrough - case "AccessDenied": - if errResp.Region == "" { - return "us-east-1", nil - } - return errResp.Region, nil - } - return "", err - } - } - - // Extract location. - var locationConstraint string - err = xmlDecoder(resp.Body, &locationConstraint) - if err != nil { - return "", err - } - - location := locationConstraint - // Location is empty will be 'us-east-1'. - if location == "" { - location = "us-east-1" - } - - // Location can be 'EU' convert it to meaningful 'eu-west-1'. - if location == "EU" { - location = "eu-west-1" - } - - // Save the location into cache. - - // Return. - return location, nil -} - -// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. -func (c Client) getBucketLocationRequest(ctx context.Context, bucketName string) (*http.Request, error) { - // Set location query. - urlValues := make(url.Values) - urlValues.Set("location", "") - - // Set get bucket location always as path style. - targetURL := *c.endpointURL - - // as it works in makeTargetURL method from api.go file - if h, p, err := net.SplitHostPort(targetURL.Host); err == nil { - if targetURL.Scheme == "http" && p == "80" || targetURL.Scheme == "https" && p == "443" { - targetURL.Host = h - } - } - - isVirtualHost := s3utils.IsVirtualHostSupported(targetURL, bucketName) - - var urlStr string - - //only support Aliyun OSS for virtual hosted path, compatible Amazon & Google Endpoint - if isVirtualHost && s3utils.IsAliyunOSSEndpoint(targetURL) { - urlStr = c.endpointURL.Scheme + "://" + bucketName + "." + targetURL.Host + "/?location" - } else { - targetURL.Path = path.Join(bucketName, "") + "/" - targetURL.RawQuery = urlValues.Encode() - urlStr = targetURL.String() - } - - // Get a new HTTP request for the method. - req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) - if err != nil { - return nil, err - } - - // Set UserAgent for the request. - c.setUserAgent(req) - - // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - if signerType.IsAnonymous() { - return req, nil - } - - if signerType.IsV2() { - // Get Bucket Location calls should be always path style - isVirtualHost := false - req = signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) - return req, nil - } - - // Set sha256 sum for signature calculation only with signature version '4'. - contentSha256 := emptySHA256Hex - if c.secure { - contentSha256 = unsignedPayload - } - - req.Header.Set("X-Amz-Content-Sha256", contentSha256) - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") - return req, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/code_of_conduct.md b/mantle/vendor/github.com/minio/minio-go/v7/code_of_conduct.md deleted file mode 100644 index cb232c3c..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/code_of_conduct.md +++ /dev/null @@ -1,80 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior, in compliance with the -licensing terms applying to the Project developments. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. However, these actions shall respect the -licensing terms of the Project Developments that will always supersede such -Code of Conduct. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at dev@min.io. The project team -will review and investigate all complaints, and will respond in a way that it deems -appropriate to the circumstances. The project team is obligated to maintain -confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -This version includes a clarification to ensure that the code of conduct is in -compliance with the free software licensing terms of the project. - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/mantle/vendor/github.com/minio/minio-go/v7/constants.go b/mantle/vendor/github.com/minio/minio-go/v7/constants.go deleted file mode 100644 index 2a2e6a0d..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/constants.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -/// Multipart upload defaults. - -// absMinPartSize - absolute minimum part size (5 MiB) below which -// a part in a multipart upload may not be uploaded. -const absMinPartSize = 1024 * 1024 * 5 - -// minPartSize - minimum part size 16MiB per object after which -// putObject behaves internally as multipart. -const minPartSize = 1024 * 1024 * 16 - -// maxPartsCount - maximum number of parts for a single multipart session. -const maxPartsCount = 10000 - -// maxPartSize - maximum part size 5GiB for a single multipart upload -// operation. -const maxPartSize = 1024 * 1024 * 1024 * 5 - -// maxSinglePutObjectSize - maximum size 5GiB of object per PUT -// operation. -const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 - -// maxMultipartPutObjectSize - maximum size 5TiB of object for -// Multipart operation. -const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -// we don't want to sign the request payload -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// Total number of parallel workers used for multipart operation. -const totalWorkers = 4 - -// Signature related constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" -) - -const ( - // Storage class header. - amzStorageClass = "X-Amz-Storage-Class" - - // Website redirect location header - amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" - - // Object Tagging headers - amzTaggingHeader = "X-Amz-Tagging" - amzTaggingHeaderDirective = "X-Amz-Tagging-Directive" - - amzVersionID = "X-Amz-Version-Id" - amzTaggingCount = "X-Amz-Tagging-Count" - amzExpiration = "X-Amz-Expiration" - amzReplicationStatus = "X-Amz-Replication-Status" - amzDeleteMarker = "X-Amz-Delete-Marker" - - // Object legal hold header - amzLegalHoldHeader = "X-Amz-Object-Lock-Legal-Hold" - - // Object retention header - amzLockMode = "X-Amz-Object-Lock-Mode" - amzLockRetainUntil = "X-Amz-Object-Lock-Retain-Until-Date" - amzBypassGovernance = "X-Amz-Bypass-Governance-Retention" - - // Replication status - amzBucketReplicationStatus = "X-Amz-Replication-Status" - // Minio specific Replication/lifecycle transition extension - minIOBucketSourceMTime = "X-Minio-Source-Mtime" - - minIOBucketSourceETag = "X-Minio-Source-Etag" - minIOBucketReplicationDeleteMarker = "X-Minio-Source-DeleteMarker" - minIOBucketReplicationProxyRequest = "X-Minio-Source-Proxy-Request" - minIOBucketReplicationRequest = "X-Minio-Source-Replication-Request" -) diff --git a/mantle/vendor/github.com/minio/minio-go/v7/core.go b/mantle/vendor/github.com/minio/minio-go/v7/core.go deleted file mode 100644 index 26931f22..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/core.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "net/http" - - "github.com/minio/minio-go/v7/pkg/encrypt" -) - -// Core - Inherits Client and adds new methods to expose the low level S3 APIs. -type Core struct { - *Client -} - -// NewCore - Returns new initialized a Core client, this CoreClient should be -// only used under special conditions such as need to access lower primitives -// and being able to use them to write your own wrappers. -func NewCore(endpoint string, opts *Options) (*Core, error) { - var s3Client Core - client, err := New(endpoint, opts) - if err != nil { - return nil, err - } - s3Client.Client = client - return &s3Client, nil -} - -// ListObjects - List all the objects at a prefix, optionally with marker and delimiter -// you can further filter the results. -func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { - return c.listObjectsQuery(context.Background(), bucket, prefix, marker, delimiter, maxKeys, nil) -} - -// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses -// continuationToken instead of marker to support iteration over the results. -func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { - return c.listObjectsV2Query(context.Background(), bucketName, objectPrefix, continuationToken, fetchOwner, false, delimiter, maxkeys, nil) -} - -// CopyObject - copies an object from source object to destination object on server side. -func (c Core) CopyObject(ctx context.Context, sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string, srcOpts CopySrcOptions, dstOpts PutObjectOptions) (ObjectInfo, error) { - return c.copyObjectDo(ctx, sourceBucket, sourceObject, destBucket, destObject, metadata, srcOpts, dstOpts) -} - -// CopyObjectPart - creates a part in a multipart upload by copying (a -// part of) an existing object. -func (c Core) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { - - return c.copyObjectPartDo(ctx, srcBucket, srcObject, destBucket, destObject, uploadID, - partID, startOffset, length, metadata) -} - -// PutObject - Upload object. Uploads using single PUT call. -func (c Core) PutObject(ctx context.Context, bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, opts PutObjectOptions) (UploadInfo, error) { - hookReader := newHook(data, opts.Progress) - return c.putObjectDo(ctx, bucket, object, hookReader, md5Base64, sha256Hex, size, opts) -} - -// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. -func (c Core) NewMultipartUpload(ctx context.Context, bucket, object string, opts PutObjectOptions) (uploadID string, err error) { - result, err := c.initiateMultipartUpload(ctx, bucket, object, opts) - return result.UploadID, err -} - -// ListMultipartUploads - List incomplete uploads. -func (c Core) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { - return c.listMultipartUploadsQuery(ctx, bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) -} - -// PutObjectPart - Upload an object part. -func (c Core) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - return c.uploadPart(ctx, bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) -} - -// ListObjectParts - List uploaded parts of an incomplete upload.x -func (c Core) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { - return c.listObjectPartsQuery(ctx, bucket, object, uploadID, partNumberMarker, maxParts) -} - -// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []CompletePart, opts PutObjectOptions) (string, error) { - res, err := c.completeMultipartUpload(ctx, bucket, object, uploadID, completeMultipartUpload{ - Parts: parts, - }, opts) - return res.ETag, err -} - -// AbortMultipartUpload - Abort an incomplete upload. -func (c Core) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { - return c.abortMultipartUpload(ctx, bucket, object, uploadID) -} - -// GetBucketPolicy - fetches bucket access policy for a given bucket. -func (c Core) GetBucketPolicy(ctx context.Context, bucket string) (string, error) { - return c.getBucketPolicy(ctx, bucket) -} - -// PutBucketPolicy - applies a new bucket access policy for a given bucket. -func (c Core) PutBucketPolicy(ctx context.Context, bucket, bucketPolicy string) error { - return c.putBucketPolicy(ctx, bucket, bucketPolicy) -} - -// GetObject is a lower level API implemented to support reading -// partial objects and also downloading objects with special conditions -// matching etag, modtime etc. -func (c Core) GetObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, http.Header, error) { - return c.getObject(ctx, bucketName, objectName, opts) -} - -// StatObject is a lower level API implemented to support special -// conditions matching etag, modtime on a request. -func (c Core) StatObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - return c.statObject(ctx, bucketName, objectName, opts) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/functional_tests.go b/mantle/vendor/github.com/minio/minio-go/v7/functional_tests.go deleted file mode 100644 index 6b329dd2..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ /dev/null @@ -1,11812 +0,0 @@ -// +build mint - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "bytes" - "context" - "errors" - "fmt" - "hash/crc32" - "io" - "io/ioutil" - "math/rand" - "mime/multipart" - "net/http" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/dustin/go-humanize" - jsoniter "github.com/json-iterator/go" - log "github.com/sirupsen/logrus" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/notification" - "github.com/minio/minio-go/v7/pkg/tags" -) - -const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" -const ( - letterIdxBits = 6 // 6 bits to represent a letter index - letterIdxMask = 1<= len(buf) { - err = nil - } else if n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -func cleanEmptyEntries(fields log.Fields) log.Fields { - cleanFields := log.Fields{} - for k, v := range fields { - if v != "" { - cleanFields[k] = v - } - } - return cleanFields -} - -// log successful test runs -func successLogger(testName string, function string, args map[string]interface{}, startTime time.Time) *log.Entry { - // calculate the test case duration - duration := time.Since(startTime) - // log with the fields as per mint - fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"} - return log.WithFields(cleanEmptyEntries(fields)) -} - -// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented, -// and log as NA in that case and continue execution. Otherwise log as failure and return -func logError(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) { - // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests - // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in - // addition to NotImplemented error returned from server - if isErrNotImplemented(err) { - ignoredLog(testName, function, args, startTime, message).Info() - } else { - failureLog(testName, function, args, startTime, alert, message, err).Fatal() - } -} - -// log failed test runs -func failureLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry { - // calculate the test case duration - duration := time.Since(startTime) - var fields log.Fields - // log with the fields as per mint - if err != nil { - fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err} - } else { - fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message} - } - return log.WithFields(cleanEmptyEntries(fields)) -} - -// log not applicable test runs -func ignoredLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry { - // calculate the test case duration - duration := time.Since(startTime) - // log with the fields as per mint - fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, - "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": strings.Split(alert, " ")[0] + " is NotImplemented"} - return log.WithFields(cleanEmptyEntries(fields)) -} - -// Delete objects in given bucket, recursively -func cleanupBucket(bucketName string, c *minio.Client) error { - // Create a done channel to control 'ListObjectsV2' go routine. - doneCh := make(chan struct{}) - // Exit cleanly upon return. - defer close(doneCh) - // Iterate over all objects in the bucket via listObjectsV2 and delete - for objCh := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Recursive: true}) { - if objCh.Err != nil { - return objCh.Err - } - if objCh.Key != "" { - err := c.RemoveObject(context.Background(), bucketName, objCh.Key, minio.RemoveObjectOptions{}) - if err != nil { - return err - } - } - } - for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { - if objPartInfo.Err != nil { - return objPartInfo.Err - } - if objPartInfo.Key != "" { - err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) - if err != nil { - return err - } - } - } - // objects are already deleted, clear the buckets now - err := c.RemoveBucket(context.Background(), bucketName) - if err != nil { - return err - } - return err -} - -func cleanupVersionedBucket(bucketName string, c *minio.Client) error { - doneCh := make(chan struct{}) - defer close(doneCh) - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { - if obj.Err != nil { - return obj.Err - } - if obj.Key != "" { - err := c.RemoveObject(context.Background(), bucketName, obj.Key, - minio.RemoveObjectOptions{VersionID: obj.VersionID, GovernanceBypass: true}) - if err != nil { - return err - } - } - } - for objPartInfo := range c.ListIncompleteUploads(context.Background(), bucketName, "", true) { - if objPartInfo.Err != nil { - return objPartInfo.Err - } - if objPartInfo.Key != "" { - err := c.RemoveIncompleteUpload(context.Background(), bucketName, objPartInfo.Key) - if err != nil { - return err - } - } - } - // objects are already deleted, clear the buckets now - err := c.RemoveBucket(context.Background(), bucketName) - if err != nil { - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) { - log.Println("found", obj.Key, obj.VersionID) - } - return err - } - return err -} - -func isErrNotImplemented(err error) bool { - return minio.ToErrorResponse(err).Code == "NotImplemented" -} - -func init() { - // If server endpoint is not set, all tests default to - // using https://play.min.io - if os.Getenv(serverEndpoint) == "" { - os.Setenv(serverEndpoint, "play.min.io") - os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F") - os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG") - os.Setenv(enableHTTPS, "1") - } -} - -var mintDataDir = os.Getenv("MINT_DATA_DIR") - -func getMintDataDirFilePath(filename string) (fp string) { - if mintDataDir == "" { - return - } - return filepath.Join(mintDataDir, filename) -} - -func newRandomReader(seed, size int64) io.Reader { - return io.LimitReader(rand.New(rand.NewSource(seed)), size) -} - -func mustCrcReader(r io.Reader) uint32 { - crc := crc32.NewIEEE() - _, err := io.Copy(crc, r) - if err != nil { - panic(err) - } - return crc.Sum32() -} - -func crcMatches(r io.Reader, want uint32) error { - crc := crc32.NewIEEE() - _, err := io.Copy(crc, r) - if err != nil { - panic(err) - } - got := crc.Sum32() - if got != want { - return fmt.Errorf("crc mismatch, want %x, got %x", want, got) - } - return nil -} - -func crcMatchesName(r io.Reader, name string) error { - want := dataFileCRC32[name] - crc := crc32.NewIEEE() - _, err := io.Copy(crc, r) - if err != nil { - panic(err) - } - got := crc.Sum32() - if got != want { - return fmt.Errorf("crc mismatch, want %x, got %x", want, got) - } - return nil -} - -// read data from file if it exists or optionally create a buffer of particular size -func getDataReader(fileName string) io.ReadCloser { - if mintDataDir == "" { - size := int64(dataFileMap[fileName]) - if _, ok := dataFileCRC32[fileName]; !ok { - dataFileCRC32[fileName] = mustCrcReader(newRandomReader(size, size)) - } - return ioutil.NopCloser(newRandomReader(size, size)) - } - reader, _ := os.Open(getMintDataDirFilePath(fileName)) - if _, ok := dataFileCRC32[fileName]; !ok { - dataFileCRC32[fileName] = mustCrcReader(reader) - reader.Close() - reader, _ = os.Open(getMintDataDirFilePath(fileName)) - } - return reader -} - -// randString generates random names and prepends them with a known prefix. -func randString(n int, src rand.Source, prefix string) string { - b := make([]byte, n) - // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! - for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { - if remain == 0 { - cache, remain = src.Int63(), letterIdxMax - } - if idx := int(cache & letterIdxMask); idx < len(letterBytes) { - b[i] = letterBytes[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - return prefix + string(b[0:30-len(prefix)]) -} - -var dataFileMap = map[string]int{ - "datafile-0-b": 0, - "datafile-1-b": 1, - "datafile-1-kB": 1 * humanize.KiByte, - "datafile-10-kB": 10 * humanize.KiByte, - "datafile-33-kB": 33 * humanize.KiByte, - "datafile-100-kB": 100 * humanize.KiByte, - "datafile-1.03-MB": 1056 * humanize.KiByte, - "datafile-1-MB": 1 * humanize.MiByte, - "datafile-5-MB": 5 * humanize.MiByte, - "datafile-6-MB": 6 * humanize.MiByte, - "datafile-11-MB": 11 * humanize.MiByte, - "datafile-65-MB": 65 * humanize.MiByte, - "datafile-129-MB": 129 * humanize.MiByte, -} - -var dataFileCRC32 = map[string]uint32{} - -func isFullMode() bool { - return os.Getenv("MINT_MODE") == "full" -} - -func getFuncName() string { - return getFuncNameLoc(2) -} - -func getFuncNameLoc(caller int) string { - pc, _, _, _ := runtime.Caller(caller) - return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.") -} - -// Tests bucket re-create errors. -func testMakeBucketError() { - region := "eu-central-1" - - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - // initialize logging params - args := map[string]interface{}{ - "bucketName": "", - "region": region, - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket Failed", err) - return - } - defer cleanupBucket(bucketName, c) - - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { - logError(testName, function, args, startTime, "", "Bucket already exists", err) - return - } - // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - logError(testName, function, args, startTime, "", "Invalid error returned by server", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testMetadataSizeLimit() { - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, objectSize, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts.UserMetadata": "", - } - rand.Seed(startTime.Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - const HeaderSizeLimit = 8 * 1024 - const UserMetadataLimit = 2 * 1024 - - // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail - metadata := make(map[string]string) - metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test"))) - args["metadata"] = fmt.Sprint(metadata) - - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) - if err == nil { - logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil) - return - } - - // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail - metadata = make(map[string]string) - metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test"))) - args["metadata"] = fmt.Sprint(metadata) - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata}) - if err == nil { - logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests various bucket supported formats. -func testMakeBucketRegions() { - region := "eu-central-1" - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - // initialize logging params - args := map[string]interface{}{ - "bucketName": "", - "region": region, - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - // Make a new bucket with '.' in its name, in 'us-west-2'. This - // request is internally staged into a path style instead of - // virtual host style. - region = "us-west-2" - args["region"] = region - if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: region}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName+".withperiod", c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject using a large data to trigger multipart readat -func testPutObjectReadAt() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "objectContentType", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Object content type - objectContentType := "binary/octet-stream" - args["objectContentType"] = objectContentType - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Get Object failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat Object failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) - return - } - if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content types don't match", err) - return - } - if err := crcMatchesName(r, "datafile-129-MB"); err != nil { - logError(testName, function, args, startTime, "", "data CRC check failed", err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testListObjectVersions() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ListObjectVersions(bucketName, prefix, recursive)" - args := map[string]interface{}{ - "bucketName": "", - "prefix": "", - "recursive": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - bufSize := dataFileMap["datafile-10-kB"] - var reader = getDataReader("datafile-10-kB") - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - reader.Close() - - bufSize = dataFileMap["datafile-1-b"] - reader = getDataReader("datafile-1-b") - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - reader.Close() - - err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Unexpected object deletion", err) - return - } - - var deleteMarkers, versions int - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - if info.Key != objectName { - logError(testName, function, args, startTime, "", "Unexpected object name in listing objects", nil) - return - } - if info.VersionID == "" { - logError(testName, function, args, startTime, "", "Unexpected version id in listing objects", nil) - return - } - if info.IsDeleteMarker { - deleteMarkers++ - if !info.IsLatest { - logError(testName, function, args, startTime, "", "Unexpected IsLatest field in listing objects", nil) - return - } - } else { - versions++ - } - } - - if deleteMarkers != 1 { - logError(testName, function, args, startTime, "", "Unexpected number of DeleteMarker elements in listing objects", nil) - return - } - - if versions != 2 { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testStatObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "StatObject" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - bufSize := dataFileMap["datafile-10-kB"] - var reader = getDataReader("datafile-10-kB") - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - reader.Close() - - bufSize = dataFileMap["datafile-1-b"] - reader = getDataReader("datafile-1-b") - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - reader.Close() - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - - var results []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - results = append(results, info) - } - - if len(results) != 2 { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - for i := 0; i < len(results); i++ { - opts := minio.StatObjectOptions{VersionID: results[i].VersionID} - statInfo, err := c.StatObject(context.Background(), bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "error during HEAD object", err) - return - } - if statInfo.VersionID == "" || statInfo.VersionID != results[i].VersionID { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected version id", err) - return - } - if statInfo.ETag != results[i].ETag { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) - return - } - if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) - return - } - if statInfo.Size != results[i].Size { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) - return - } - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testGetObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Save the contents of datafiles to check with GetObject() reader output later - var buffers [][]byte - var testFiles = []string{"datafile-1-b", "datafile-10-kB"} - - for _, testFile := range testFiles { - r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - r.Close() - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - buffers = append(buffers, buf) - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - - var results []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - results = append(results, info) - } - - if len(results) != 2 { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - sort.SliceStable(results, func(i, j int) bool { - return results[i].Size < results[j].Size - }) - - sort.SliceStable(buffers, func(i, j int) bool { - return len(buffers[i]) < len(buffers[j]) - }) - - for i := 0; i < len(results); i++ { - opts := minio.GetObjectOptions{VersionID: results[i].VersionID} - reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "error during GET object", err) - return - } - statInfo, err := reader.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) - return - } - if statInfo.ETag != results[i].ETag { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) - return - } - if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) - return - } - if statInfo.Size != results[i].Size { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) - return - } - - tmpBuffer := bytes.NewBuffer([]byte{}) - _, err = io.Copy(tmpBuffer, reader) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) - return - } - - if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { - logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) - return - } - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testPutObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - const n = 10 - // Read input... - - // Save the data concurrently. - var wg sync.WaitGroup - wg.Add(n) - var buffers = make([][]byte, n) - var errs [n]error - for i := 0; i < n; i++ { - r := newRandomReader(int64((1<<20)*i+i), int64(i)) - buf, err := ioutil.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - buffers[i] = buf - - go func(i int) { - defer wg.Done() - _, errs[i] = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{PartSize: 5 << 20}) - }(i) - } - wg.Wait() - for _, err := range errs { - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - var results []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - results = append(results, info) - } - - if len(results) != n { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - sort.Slice(results, func(i, j int) bool { - return results[i].Size < results[j].Size - }) - - sort.Slice(buffers, func(i, j int) bool { - return len(buffers[i]) < len(buffers[j]) - }) - - for i := 0; i < len(results); i++ { - opts := minio.GetObjectOptions{VersionID: results[i].VersionID} - reader, err := c.GetObject(context.Background(), bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "error during GET object", err) - return - } - statInfo, err := reader.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "error during calling reader.Stat()", err) - return - } - if statInfo.ETag != results[i].ETag { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected ETag", err) - return - } - if statInfo.LastModified.Unix() != results[i].LastModified.Unix() { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Last-Modified", err) - return - } - if statInfo.Size != results[i].Size { - logError(testName, function, args, startTime, "", "error during HEAD object, unexpected Content-Length", err) - return - } - - tmpBuffer := bytes.NewBuffer([]byte{}) - _, err = io.Copy(tmpBuffer, reader) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected io.Copy()", err) - return - } - - if !bytes.Equal(tmpBuffer.Bytes(), buffers[i]) { - logError(testName, function, args, startTime, "", "unexpected content of GetObject()", err) - return - } - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testCopyObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - var testFiles = []string{"datafile-1-b", "datafile-10-kB"} - for _, testFile := range testFiles { - r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - r.Close() - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - var infos []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - infos = append(infos, info) - } - - sort.Slice(infos, func(i, j int) bool { - return infos[i].Size < infos[j].Size - }) - - reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) - return - } - - oldestContent, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) - return - } - - // Copy Source - srcOpts := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - VersionID: infos[0].VersionID, - } - args["src"] = srcOpts - - dstOpts := minio.CopyDestOptions{ - Bucket: bucketName, - Object: objectName + "-copy", - } - args["dst"] = dstOpts - - // Perform the Copy - if _, err = c.CopyObject(context.Background(), dstOpts, srcOpts); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer readerCopy.Close() - - newestContent, err := ioutil.ReadAll(readerCopy) - if err != nil { - logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) - return - } - - if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { - logError(testName, function, args, startTime, "", "Unexpected destination object content", err) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testConcurrentCopyObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - var testFiles = []string{"datafile-10-kB"} - for _, testFile := range testFiles { - r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - r.Close() - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - var infos []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - infos = append(infos, info) - } - - sort.Slice(infos, func(i, j int) bool { - return infos[i].Size < infos[j].Size - }) - - reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{VersionID: infos[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject of the oldest version content failed", err) - return - } - - oldestContent, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "Reading the oldest object version failed", err) - return - } - - // Copy Source - srcOpts := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - VersionID: infos[0].VersionID, - } - args["src"] = srcOpts - - dstOpts := minio.CopyDestOptions{ - Bucket: bucketName, - Object: objectName + "-copy", - } - args["dst"] = dstOpts - - // Perform the Copy concurrently - const n = 10 - var wg sync.WaitGroup - wg.Add(n) - var errs [n]error - for i := 0; i < n; i++ { - go func(i int) { - defer wg.Done() - _, errs[i] = c.CopyObject(context.Background(), dstOpts, srcOpts) - }(i) - } - wg.Wait() - for _, err := range errs { - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - } - - objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: false, Prefix: dstOpts.Object}) - infos = []minio.ObjectInfo{} - for info := range objectsInfo { - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{VersionID: info.VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer readerCopy.Close() - - newestContent, err := ioutil.ReadAll(readerCopy) - if err != nil { - logError(testName, function, args, startTime, "", "Reading from GetObject reader failed", err) - return - } - - if len(newestContent) == 0 || !bytes.Equal(oldestContent, newestContent) { - logError(testName, function, args, startTime, "", "Unexpected destination object content", err) - return - } - infos = append(infos, info) - } - - if len(infos) != n { - logError(testName, function, args, startTime, "", "Unexpected number of Version elements in listing objects", nil) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testComposeObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} - var testFiles = []string{"datafile-5-MB", "datafile-10-kB"} - var testFilesBytes [][]byte - - for _, testFile := range testFiles { - r := getDataReader(testFile) - buf, err := ioutil.ReadAll(r) - if err != nil { - logError(testName, function, args, startTime, "", "unexpected failure", err) - return - } - r.Close() - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - testFilesBytes = append(testFilesBytes, buf) - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - - var results []minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - results = append(results, info) - } - - sort.SliceStable(results, func(i, j int) bool { - return results[i].Size > results[j].Size - }) - - // Source objects to concatenate. We also specify decryption - // key for each - src1 := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - VersionID: results[0].VersionID, - } - - src2 := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - VersionID: results[1].VersionID, - } - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: objectName + "-copy", - } - - _, err = c.ComposeObject(context.Background(), dst, src1, src2) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName, objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject of the copy object failed", err) - return - } - defer readerCopy.Close() - - copyContentBytes, err := ioutil.ReadAll(readerCopy) - if err != nil { - logError(testName, function, args, startTime, "", "Reading from the copy object reader failed", err) - return - } - - var expectedContent []byte - for _, fileBytes := range testFilesBytes { - expectedContent = append(expectedContent, fileBytes...) - } - - if len(copyContentBytes) == 0 || !bytes.Equal(copyContentBytes, expectedContent) { - logError(testName, function, args, startTime, "", "Unexpected destination object content", err) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testRemoveObjectWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "DeleteObject()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - objectsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - var version minio.ObjectInfo - for info := range objectsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - version = info - break - } - - err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{VersionID: version.VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "DeleteObject failed", err) - return - } - - objectsInfo = c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - for range objectsInfo { - logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) - return - } - - err = c.RemoveBucket(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testRemoveObjectsWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "DeleteObjects()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader("datafile-10-kB"), int64(dataFileMap["datafile-10-kB"]), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - objectsVersions := make(chan minio.ObjectInfo) - go func() { - objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, - minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - for info := range objectsVersionsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - objectsVersions <- info - } - close(objectsVersions) - }() - - removeErrors := c.RemoveObjects(context.Background(), bucketName, objectsVersions, minio.RemoveObjectsOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "DeleteObjects call failed", err) - return - } - - for e := range removeErrors { - if e.Err != nil { - logError(testName, function, args, startTime, "", "Single delete operation failed", err) - return - } - } - - objectsVersionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - for range objectsVersionsInfo { - logError(testName, function, args, startTime, "", "Unexpected versioning info, should not have any one ", err) - return - } - - err = c.RemoveBucket(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testObjectTaggingWithVersioning() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "{Get,Set,Remove}ObjectTagging()" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - err = c.EnableVersioning(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "Enable versioning failed", err) - return - } - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - for _, file := range []string{"datafile-1-b", "datafile-10-kB"} { - _, err = c.PutObject(context.Background(), bucketName, objectName, getDataReader(file), int64(dataFileMap[file]), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - } - - versionsInfo := c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{WithVersions: true, Recursive: true}) - - var versions []minio.ObjectInfo - for info := range versionsInfo { - if info.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error during listing objects", err) - return - } - versions = append(versions, info) - } - - sort.SliceStable(versions, func(i, j int) bool { - return versions[i].Size < versions[j].Size - }) - - tagsV1 := map[string]string{"key1": "val1"} - t1, err := tags.MapToObjectTags(tagsV1) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) - return - } - - err = c.PutObjectTagging(context.Background(), bucketName, objectName, t1, minio.PutObjectTaggingOptions{VersionID: versions[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) - return - } - - tagsV2 := map[string]string{"key2": "val2"} - t2, err := tags.MapToObjectTags(tagsV2) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (1) failed", err) - return - } - - err = c.PutObjectTagging(context.Background(), bucketName, objectName, t2, minio.PutObjectTaggingOptions{VersionID: versions[1].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) - return - } - - tagsEqual := func(tags1, tags2 map[string]string) bool { - for k1, v1 := range tags1 { - v2, found := tags2[k1] - if found { - if v1 != v2 { - return false - } - } - } - return true - } - - gotTagsV1, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) - return - } - - if !tagsEqual(t1.ToMap(), gotTagsV1.ToMap()) { - logError(testName, function, args, startTime, "", "Unexpected tags content (1)", err) - return - } - - gotTagsV2, err := c.GetObjectTagging(context.Background(), bucketName, objectName, minio.GetObjectTaggingOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectTaggingContext failed", err) - return - } - - if !tagsEqual(t2.ToMap(), gotTagsV2.ToMap()) { - logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) - return - } - - err = c.RemoveObjectTagging(context.Background(), bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versions[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectTagging (2) failed", err) - return - } - - emptyTags, err := c.GetObjectTagging(context.Background(), bucketName, objectName, - minio.GetObjectTaggingOptions{VersionID: versions[0].VersionID}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObjectTagging failed", err) - return - } - - if len(emptyTags.ToMap()) != 0 { - logError(testName, function, args, startTime, "", "Unexpected tags content (2)", err) - return - } - - // Delete all objects and their versions as long as the bucket itself - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test PutObject using a large data to trigger multipart readat -func testPutObjectWithMetadata() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", - } - - if !isFullMode() { - ignoredLog(testName, function, args, startTime, "Skipping functional tests for short/quick runs").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "Make bucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Object custom metadata - customContentType := "custom/contenttype" - - args["metadata"] = map[string][]string{ - "Content-Type": {customContentType}, - "X-Amz-Meta-CustomKey": {"extra spaces in value"}, - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ - ContentType: customContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) - return - } - if err := crcMatchesName(r, "datafile-129-MB"); err != nil { - logError(testName, function, args, startTime, "", "data CRC check failed", err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testPutObjectWithContentLanguage() { - // initialize logging params - objectName := "test-object" - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - data := []byte{} - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ - ContentLanguage: "en", - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if objInfo.Metadata.Get("Content-Language") != "en" { - logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with streaming signature. -func testPutObjectStreaming() { - // initialize logging params - objectName := "test-object" - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": objectName, - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload an object. - sizes := []int64{0, 64*1024 - 1, 64 * 1024} - - for _, size := range sizes { - data := newRandomReader(size, size) - ui, err := c.PutObject(context.Background(), bucketName, objectName, data, int64(size), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } - - if ui.Size != size { - logError(testName, function, args, startTime, "", "PutObjectStreaming result has unexpected size", nil) - return - } - - objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if objInfo.Size != size { - logError(testName, function, args, startTime, "", "Unexpected size", err) - return - } - - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object seeker from the end, using whence set to '2'. -func testGetObjectSeekEnd() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - - pos, err := r.Seek(-100, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Object Seek failed", err) - return - } - if pos != st.Size-100 { - logError(testName, function, args, startTime, "", "Incorrect position", err) - return - } - buf2 := make([]byte, 100) - m, err := readFull(r, buf2) - if err != nil { - logError(testName, function, args, startTime, "", "Error reading through readFull", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err) - return - } - hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:]) - hexBuf2 := fmt.Sprintf("%02x", buf2[:m]) - if hexBuf1 != hexBuf2 { - logError(testName, function, args, startTime, "", "Values at same index dont match", err) - return - } - pos, err = r.Seek(-100, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Object Seek failed", err) - return - } - if pos != st.Size-100 { - logError(testName, function, args, startTime, "", "Incorrect position", err) - return - } - if err = r.Close(); err != nil { - logError(testName, function, args, startTime, "", "ObjectClose failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object reader to not throw error on being closed twice. -func testGetObjectClosedTwice() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - if err := crcMatchesName(r, "datafile-33-kB"); err != nil { - logError(testName, function, args, startTime, "", "data CRC check failed", err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Object Close failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Already closed object. No error returned", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test RemoveObjects request where context cancels after timeout -func testRemoveObjectsContext() { - // Initialize logging params. - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(ctx, bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current tie. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate put data. - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - // Multi remove of 20 objects. - nrObjects := 20 - objectsCh := make(chan minio.ObjectInfo) - go func() { - defer close(objectsCh) - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, - minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - continue - } - objectsCh <- minio.ObjectInfo{ - Key: info.Key, - VersionID: info.VersionID, - } - } - }() - // Set context to cancel in 1 nanosecond. - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Call RemoveObjects API with short timeout. - errorCh := c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) - // Check for error. - select { - case r := <-errorCh: - if r.Err == nil { - logError(testName, function, args, startTime, "", "RemoveObjects should fail on short timeout", err) - return - } - } - // Set context with longer timeout. - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - defer cancel() - // Perform RemoveObjects with the longer timeout. Expect the removals to succeed. - errorCh = c.RemoveObjects(ctx, bucketName, objectsCh, minio.RemoveObjectsOptions{}) - select { - case r, more := <-errorCh: - if more || r.Err != nil { - logError(testName, function, args, startTime, "", "Unexpected error", r.Err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test removing multiple objects with Remove API -func testRemoveMultipleObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(bucketName, objectsCh)" - args := map[string]interface{}{ - "bucketName": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Enable tracing, write to stdout. - // c.TraceOn(os.Stderr) - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - r := bytes.NewReader(bytes.Repeat([]byte("a"), 8)) - - // Multi remove of 1100 objects - nrObjects := 200 - - objectsCh := make(chan minio.ObjectInfo) - - go func() { - defer close(objectsCh) - // Upload objects and send them to objectsCh - for i := 0; i < nrObjects; i++ { - objectName := "sample" + strconv.Itoa(i) + ".txt" - info, err := c.PutObject(context.Background(), bucketName, objectName, r, 8, - minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - continue - } - objectsCh <- minio.ObjectInfo{ - Key: info.Key, - VersionID: info.VersionID, - } - } - }() - - // Call RemoveObjects API - errorCh := c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) - - // Check if errorCh doesn't receive any error - select { - case r, more := <-errorCh: - if more { - logError(testName, function, args, startTime, "", "Unexpected error", r.Err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject of a big file to trigger multipart -func testFPutObjectMultipart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload. - var fileName = getMintDataDirFilePath("datafile-129-MB") - if fileName == "" { - // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - fileName = file.Name() - args["fileName"] = fileName - } - totalSize := dataFileMap["datafile-129-MB"] - // Set base object name - objectName := bucketName + "FPutObject" + "-standard" - args["objectName"] = objectName - - objectContentType := "testapplication/octet-stream" - args["objectContentType"] = objectContentType - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Unexpected error", err) - return - } - if objInfo.Size != int64(totalSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) - return - } - if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType doesn't match", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject with null contentType (default = application/octet-stream) -func testFPutObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - location := "us-east-1" - - // Make a new bucket. - args["bucketName"] = bucketName - args["location"] = location - function = "MakeBucket(bucketName, location)" - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-129-MB") - if fName == "" { - // Make a temp file with minPartSize bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload. - if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - - // Set base object name - function = "FPutObject(bucketName, objectName, fileName, opts)" - objectName := bucketName + "FPutObject" - args["objectName"] = objectName + "-standard" - args["fileName"] = fName - args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"} - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - ui, err := c.FPutObject(context.Background(), bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - if ui.Size != int64(dataFileMap["datafile-129-MB"]) { - logError(testName, function, args, startTime, "", "FPutObject returned an unexpected upload size", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - args["objectName"] = objectName + "-Octet" - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - - srcFile, err := os.Open(fName) - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - defer srcFile.Close() - // Add extension to temp file name - tmpFile, err := os.Create(fName + ".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "File create failed", err) - return - } - _, err = io.Copy(tmpFile, srcFile) - if err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - tmpFile.Close() - - // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - args["objectName"] = objectName + "-GTar" - args["opts"] = minio.PutObjectOptions{} - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - // Check headers - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-standard" - rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rStandard.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err) - return - } - - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-Octet" - rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rOctet.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rOctet.ContentType, err) - return - } - - function = "StatObject(bucketName, objectName, opts)" - args["objectName"] = objectName + "-GTar" - rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err) - return - } - - os.Remove(fName + ".gtar") - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject request when context cancels after timeout -func testFPutObjectContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload 1 parts worth of data to use multipart upload. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-1-MB") - if fName == "" { - // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - // Upload 1 parts to trigger multipart upload - if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - - // Set base object name - objectName := bucketName + "FPutObjectContext" - args["objectName"] = objectName - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Perform FPutObject with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - // Perform FPutObject with a long timeout. Expect the put object to succeed - _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on long timeout", err) - return - } - - _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Tests FPutObject request when context cancels after timeout -func testFPutObjectContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObjectContext(ctx, bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "opts": "minio.PutObjectOptions{ContentType:objectContentType}", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload 1 parts worth of data to use multipart upload. - // Use different data in part for multipart tests to check parts are uploaded in correct order. - var fName = getMintDataDirFilePath("datafile-1-MB") - if fName == "" { - // Make a temp file with 1 MiB bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectContextTest") - if err != nil { - logError(testName, function, args, startTime, "", "Temp file creation failed", err) - return - } - - // Upload 1 parts to trigger multipart upload - if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - // Close the file pro-actively for windows. - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - defer os.Remove(file.Name()) - fName = file.Name() - } - - // Set base object name - objectName := bucketName + "FPutObjectContext" - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - // Perform FPutObject with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObject(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "FPutObject should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - // Perform FPutObject with a long timeout. Expect the put object to succeed - _, err = c.FPutObject(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject shouldn't fail on longer timeout", err) - return - } - - _, err = c.StatObject(context.Background(), bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test validates putObject with context to see if request cancellation is honored. -func testPutObjectContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(ctx, bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "opts": "", - } - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket call failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - cancel() - args["ctx"] = ctx - args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"} - - _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject should fail on short timeout", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - - defer cancel() - reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Tests get object ReaderSeeker interface methods. -func testGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - }() - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - // Generic seek error for errors other than io.EOF - seekErr := errors.New("seek error") - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, seekErr, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, seekErr, false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - // We expect an error - if testCase.err == seekErr && err == nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) - return - } - // We expect a specific error - if testCase.err != seekErr && testCase.err != err { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err) - return - } - // If we expect an error go to the next loop - if testCase.err != nil { - continue - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderAt interface methods. -func testGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, len(buf)) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, len(buf)+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Reproduces issue https://github.com/minio/minio-go/issues/1137 -func testGetObjectReadAtWhenEOFWasReached() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read directly - buf1 := make([]byte, len(buf)) - buf2 := make([]byte, 512) - - m, err := r.Read(buf1) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Read failed", err) - return - } - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "Read read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf) { - logError(testName, function, args, startTime, "", "Incorrect count of Read data", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, 512) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[512:1024]) { - logError(testName, function, args, startTime, "", "Incorrect count of ReadAt data", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test Presigned Post Policy -func testPresignedPostPolicy() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PresignedPostPolicy(policy)" - args := map[string]interface{}{ - "policy": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - // Azure requires the key to not start with a number - metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") - metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - policy := minio.NewPostPolicy() - - if err := policy.SetBucket(""); err == nil { - logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err) - return - } - if err := policy.SetKey(""); err == nil { - logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err) - return - } - if err := policy.SetKeyStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err) - return - } - if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil { - logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err) - return - } - if err := policy.SetContentType(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err) - return - } - if err := policy.SetContentTypeStartsWith(""); err == nil { - logError(testName, function, args, startTime, "", "SetContentTypeStartsWith did not fail for invalid conditions", err) - return - } - if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil { - logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err) - return - } - if err := policy.SetUserMetadata("", ""); err == nil { - logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err) - return - } - - policy.SetBucket(bucketName) - policy.SetKey(objectName) - policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days - policy.SetContentType("binary/octet-stream") - policy.SetContentLengthRange(10, 1024*1024) - policy.SetUserMetadata(metadataKey, metadataValue) - args["policy"] = policy.String() - - presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(context.Background(), policy) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err) - return - } - - var formBuf bytes.Buffer - writer := multipart.NewWriter(&formBuf) - for k, v := range formData { - writer.WriteField(k, v) - } - - // Get a 33KB file to upload and test if set post policy works - var filePath = getMintDataDirFilePath("datafile-33-kB") - if filePath == "" { - // Make a temp file with 33 KB data. - file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if err = file.Close(); err != nil { - logError(testName, function, args, startTime, "", "File Close failed", err) - return - } - filePath = file.Name() - } - - // add file to post request - f, err := os.Open(filePath) - defer f.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - w, err := writer.CreateFormFile("file", filePath) - if err != nil { - logError(testName, function, args, startTime, "", "CreateFormFile failed", err) - return - } - - _, err = io.Copy(w, f) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - writer.Close() - - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logError(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } - - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively canceled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: transport, - } - - req, err := http.NewRequest(http.MethodPost, presignedPostPolicyURL.String(), bytes.NewReader(formBuf.Bytes())) - if err != nil { - logError(testName, function, args, startTime, "", "Http request failed", err) - return - } - - req.Header.Set("Content-Type", writer.FormDataContentType()) - - // make post request with correct form data - res, err := httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "Http request failed", err) - return - } - defer res.Body.Close() - if res.StatusCode != http.StatusNoContent { - logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status)) - return - } - - // expected path should be absolute path of the object - var scheme string - if mustParseBool(os.Getenv(enableHTTPS)) { - scheme = "https://" - } else { - scheme = "http://" - } - - expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName - expectedLocationBucketDNS := scheme + bucketName + "." + os.Getenv(serverEndpoint) + "/" + objectName - - if val, ok := res.Header["Location"]; ok { - if val[0] != expectedLocation && val[0] != expectedLocationBucketDNS { - logError(testName, function, args, startTime, "", "Location in header response is incorrect", err) - return - } - } else { - logError(testName, function, args, startTime, "", "Location not found in header response", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests copy object -func testCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(dst, src)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName+"-copy", c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - // Copy Source - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - // Set copy conditions. - MatchETag: objInfo.ETag, - MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), - } - args["src"] = src - - dst := minio.CopyDestOptions{ - Bucket: bucketName + "-copy", - Object: objectName + "-copy", - } - - // Perform the Copy - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Source object - r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - // Check the various fields of source object against destination object. - objInfo, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - objInfoCopy, err := readerCopy.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if objInfo.Size != objInfoCopy.Size { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err) - return - } - - if err := crcMatchesName(r, "datafile-33-kB"); err != nil { - logError(testName, function, args, startTime, "", "data CRC check failed", err) - return - } - if err := crcMatchesName(readerCopy, "datafile-33-kB"); err != nil { - logError(testName, function, args, startTime, "", "copy data CRC check failed", err) - return - } - // Close all the get readers before proceeding with CopyObject operations. - r.Close() - readerCopy.Close() - - // CopyObject again but with wrong conditions - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), - NoMatchETag: objInfo.ETag, - } - - // Perform the Copy which should fail - _, err = c.CopyObject(context.Background(), dst, src) - if err == nil { - logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) - return - } - - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - } - - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: objectName, - ReplaceMetadata: true, - UserMetadata: map[string]string{ - "Copy": "should be same", - }, - } - args["dst"] = dst - args["src"] = src - - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject shouldn't fail", err) - return - } - - oi, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - stOpts := minio.StatObjectOptions{} - stOpts.SetMatchETag(oi.ETag) - objInfo, err = c.StatObject(context.Background(), bucketName, objectName, stOpts) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) - return - } - - if objInfo.Metadata.Get("x-amz-meta-copy") != "should be same" { - logError(testName, function, args, startTime, "", "CopyObject modified metadata should match", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-C get object ReaderSeeker interface methods. -func testSSECEncryptedGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - }() - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, 0, io.EOF, false, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, - // Test with invalid whence - {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - if err != nil && testCase.err == nil { - // We expected success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err == nil && testCase.err != nil { - // We expected failure, but got success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err != nil && testCase.err != nil { - if err.Error() != testCase.err.Error() { - // We expect a specific error - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-S3 get object ReaderSeeker interface methods. -func testSSES3EncryptedGetObjectReadSeekFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer func() { - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - }() - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.NewSSE(), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat object failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - // This following function helps us to compare data from the reader after seek - // with the data from the original buffer - cmpData := func(r io.Reader, start, end int) { - if end-start == 0 { - return - } - buffer := bytes.NewBuffer([]byte{}) - if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "CopyN failed", err) - return - } - } - if !bytes.Equal(buf[start:end], buffer.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - } - - testCases := []struct { - offset int64 - whence int - pos int64 - err error - shouldCmp bool - start int - end int - }{ - // Start from offset 0, fetch data and compare - {0, 0, 0, nil, true, 0, 0}, - // Start from offset 2048, fetch data and compare - {2048, 0, 2048, nil, true, 2048, bufSize}, - // Start from offset larger than possible - {int64(bufSize) + 1024, 0, 0, io.EOF, false, 0, 0}, - // Move to offset 0 without comparing - {0, 0, 0, nil, false, 0, 0}, - // Move one step forward and compare - {1, 1, 1, nil, true, 1, bufSize}, - // Move larger than possible - {int64(bufSize), 1, 0, io.EOF, false, 0, 0}, - // Provide negative offset with CUR_SEEK - {int64(-1), 1, 0, fmt.Errorf("Negative position not allowed for 1"), false, 0, 0}, - // Test with whence SEEK_END and with positive offset - {1024, 2, 0, io.EOF, false, 0, 0}, - // Test with whence SEEK_END and with negative offset - {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize}, - // Test with whence SEEK_END and with large negative offset - {-int64(bufSize) * 2, 2, 0, fmt.Errorf("Seeking at negative offset not allowed for 2"), false, 0, 0}, - // Test with invalid whence - {0, 3, 0, fmt.Errorf("Invalid whence 3"), false, 0, 0}, - } - - for i, testCase := range testCases { - // Perform seek operation - n, err := r.Seek(testCase.offset, testCase.whence) - if err != nil && testCase.err == nil { - // We expected success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err == nil && testCase.err != nil { - // We expected failure, but got success. - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - if err != nil && testCase.err != nil { - if err.Error() != testCase.err.Error() { - // We expect a specific error - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, unexpected err value: expected: %s, found: %s", i+1, testCase.err, err), err) - return - } - } - // Check the returned seek pos - if n != testCase.pos { - logError(testName, function, args, startTime, "", - fmt.Sprintf("Test %d, number of bytes seeked does not match, expected %d, got %d", i+1, testCase.pos, n), err) - return - } - // Compare only if shouldCmp is activated - if testCase.shouldCmp { - cmpData(r, testCase.start, testCase.end) - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-C get object ReaderAt interface methods. -func testSSECEncryptedGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ - ServerSideEncryption: encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - defer r.Close() - - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, len(buf)) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, len(buf)+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests SSE-S3 get object ReaderAt interface methods. -func testSSES3EncryptedGetObjectReadAtFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 129MiB of data. - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - ServerSideEncryption: encrypt.NewSSE(), - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - defer r.Close() - - offset := int64(2048) - - // read directly - buf1 := make([]byte, 512) - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - // Test readAt before stat is called such that objectInfo doesn't change. - m, err := r.ReadAt(buf1, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf1) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err) - return - } - if !bytes.Equal(buf1, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err) - return - } - - m, err = r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, len(buf)) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, len(buf)+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// testSSECEncryptionPutGet tests encryption with customer provided encryption keys -func testSSECEncryptionPutGet() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - const password = "correct horse battery staple" // https://xkcd.com/936/ - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - args["sse"] = sse - - // Put encrypted data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - successLogger(testName, function, args, startTime).Info() - - } - - successLogger(testName, function, args, startTime).Info() -} - -// TestEncryptionFPut tests encryption with customer specified encryption keys -func testSSECEncryptionFPut() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Object custom metadata - customContentType := "custom/contenttype" - args["metadata"] = customContentType - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 0)}, - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - const password = "correct horse battery staple" // https://xkcd.com/936/ - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - args["sse"] = sse - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - _, err = file.Write(testCase.buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - file.Close() - // Put encrypted data - if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { - logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - os.Remove(fileName) - } - - successLogger(testName, function, args, startTime).Info() -} - -// testSSES3EncryptionPutGet tests SSE-S3 encryption -func testSSES3EncryptionPutGet() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutEncryptedObject(bucketName, objectName, reader, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.NewSSE() - args["sse"] = sse - - // Put encrypted data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(testCase.buf), int64(len(testCase.buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err) - return - } - - // Read the data back without any encryption headers - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - successLogger(testName, function, args, startTime).Info() - - } - - successLogger(testName, function, args, startTime).Info() -} - -// TestSSES3EncryptionFPut tests server side encryption -func testSSES3EncryptionFPut() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, sse)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "filePath": "", - "contentType": "", - "sse": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Object custom metadata - customContentType := "custom/contenttype" - args["metadata"] = customContentType - - testCases := []struct { - buf []byte - }{ - {buf: bytes.Repeat([]byte("F"), 0)}, - {buf: bytes.Repeat([]byte("F"), 1)}, - {buf: bytes.Repeat([]byte("F"), 15)}, - {buf: bytes.Repeat([]byte("F"), 16)}, - {buf: bytes.Repeat([]byte("F"), 17)}, - {buf: bytes.Repeat([]byte("F"), 31)}, - {buf: bytes.Repeat([]byte("F"), 32)}, - {buf: bytes.Repeat([]byte("F"), 33)}, - {buf: bytes.Repeat([]byte("F"), 1024)}, - {buf: bytes.Repeat([]byte("F"), 1024*2)}, - {buf: bytes.Repeat([]byte("F"), 1024*1024)}, - } - - for i, testCase := range testCases { - // Generate a random object name - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Secured object - sse := encrypt.NewSSE() - args["sse"] = sse - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - _, err = file.Write(testCase.buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - file.Close() - // Put encrypted data - if _, err = c.FPutObject(context.Background(), bucketName, objectName, fileName, minio.PutObjectOptions{ServerSideEncryption: sse}); err != nil { - logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err) - return - } - defer r.Close() - - // Compare the sent object with the received one - recvBuffer := bytes.NewBuffer([]byte{}) - if _, err = io.Copy(recvBuffer, r); err != nil { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err) - return - } - if recvBuffer.Len() != len(testCase.buf) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err) - return - } - if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) { - logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err) - return - } - - os.Remove(fileName) - } - - successLogger(testName, function, args, startTime).Info() -} - -func testBucketNotification() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "SetBucketNotification(bucketName)" - args := map[string]interface{}{ - "bucketName": "", - } - - if os.Getenv("NOTIFY_BUCKET") == "" || - os.Getenv("NOTIFY_SERVICE") == "" || - os.Getenv("NOTIFY_REGION") == "" || - os.Getenv("NOTIFY_ACCOUNTID") == "" || - os.Getenv("NOTIFY_RESOURCE") == "" { - ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info() - return - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - bucketName := os.Getenv("NOTIFY_BUCKET") - args["bucketName"] = bucketName - - topicArn := notification.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE")) - queueArn := notification.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource") - - topicConfig := notification.NewConfig(topicArn) - topicConfig.AddEvents(notification.ObjectCreatedAll, notification.ObjectRemovedAll) - topicConfig.AddFilterSuffix("jpg") - - queueConfig := notification.NewConfig(queueArn) - queueConfig.AddEvents(notification.ObjectCreatedAll) - queueConfig.AddFilterPrefix("photos/") - - config := notification.Configuration{} - config.AddTopic(topicConfig) - - // Add the same topicConfig again, should have no effect - // because it is duplicated - config.AddTopic(topicConfig) - if len(config.TopicConfigs) != 1 { - logError(testName, function, args, startTime, "", "Duplicate entry added", err) - return - } - - // Add and remove a queue config - config.AddQueue(queueConfig) - config.RemoveQueueByArn(queueArn) - - err = c.SetBucketNotification(context.Background(), bucketName, config) - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketNotification failed", err) - return - } - - config, err = c.GetBucketNotification(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketNotification failed", err) - return - } - - if len(config.TopicConfigs) != 1 { - logError(testName, function, args, startTime, "", "Topic config is empty", err) - return - } - - if config.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" { - logError(testName, function, args, startTime, "", "Couldn't get the suffix", err) - return - } - - err = c.RemoveAllBucketNotification(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests comprehensive list of all methods. -func testFunctional() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "testFunctional()" - functionAll := "" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, nil, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket. - function = "MakeBucket(bucketName, region)" - functionAll = "MakeBucket(bucketName, region)" - args["bucketName"] = bucketName - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - - defer cleanupBucket(bucketName, c) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "File creation failed", err) - return - } - for i := 0; i < 3; i++ { - buf := make([]byte, rand.Intn(1<<19)) - _, err = file.Write(buf) - if err != nil { - logError(testName, function, args, startTime, "", "File write failed", err) - return - } - } - file.Close() - - // Verify if bucket exits and you have access. - var exists bool - function = "BucketExists(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExists(context.Background(), bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "BucketExists failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find the bucket", err) - return - } - - // Asserting the default bucket policy. - function = "GetBucketPolicy(ctx, bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - nilPolicy, err := c.GetBucketPolicy(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - if nilPolicy != "" { - logError(testName, function, args, startTime, "", "policy should be set to nil", err) - return - } - - // Set the bucket policy to 'public readonly'. - function = "SetBucketPolicy(bucketName, readOnlyPolicy)" - functionAll += ", " + function - - readOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readOnlyPolicy, - } - - err = c.SetBucketPolicy(context.Background(), bucketName, readOnlyPolicy) - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `readonly`. - function = "GetBucketPolicy(ctx, bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - _, err = c.GetBucketPolicy(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // Make the bucket 'public writeonly'. - function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" - functionAll += ", " + function - - writeOnlyPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": writeOnlyPolicy, - } - err = c.SetBucketPolicy(context.Background(), bucketName, writeOnlyPolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `writeonly`. - function = "GetBucketPolicy(ctx, bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - - _, err = c.GetBucketPolicy(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, readWritePolicy)" - functionAll += ", " + function - - readWritePolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Resource":["arn:aws:s3:::` + bucketName + `"]}]}` - - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readWritePolicy, - } - err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - // should return policy `readwrite`. - function = "GetBucketPolicy(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - _, err = c.GetBucketPolicy(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) - return - } - - // List all buckets. - function = "ListBuckets()" - functionAll += ", " + function - args = nil - buckets, err := c.ListBuckets(context.Background()) - - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "Found bucket list to be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBuckets failed", err) - return - } - - // Verify if previously created bucket is listed in list buckets. - bucketFound := false - for _, bucket := range buckets { - if bucket.Name == bucketName { - bucketFound = true - } - } - - // If bucket not found error out. - if !bucketFound { - logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err) - return - } - - objectName := bucketName + "unique" - - // Generate data - buf := bytes.Repeat([]byte("f"), 1<<19) - - function = "PutObject(bucketName, objectName, reader, contentType)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": "", - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-nolength", - "contentType": "binary/octet-stream", - } - - _, err = c.PutObject(context.Background(), bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Instantiate a done channel to close all listing. - doneCh := make(chan struct{}) - defer close(doneCh) - - objFound := false - isRecursive := true // Recursive is true. - - function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: true}) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) - return - } - - objFound = false - isRecursive = true // Recursive is true. - function = "ListObjects()" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{Prefix: objectName, Recursive: isRecursive}) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err) - return - } - - incompObjNotFound := true - - function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - - for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { - if objIncompl.Key != "" { - incompObjNotFound = false - break - } - } - if !incompObjNotFound { - logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err := ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err) - return - } - newReader.Close() - - function = "FGetObject(bucketName, objectName, fileName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "fileName": fileName + "-f", - } - err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject failed", err) - return - } - - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - if _, err = c.PresignedHeadObject(context.Background(), bucketName, "", 3600*time.Second, nil); err == nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject success", err) - return - } - - // Generate presigned HEAD object url. - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) - return - } - - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logError(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } - - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively canceled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: transport, - } - - req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject request was incorrect", err) - return - } - - // Verify if presigned url works. - resp, err := httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err) - return - } - if resp.Header.Get("ETag") == "" { - logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err) - return - } - resp.Body.Close() - - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - _, err = c.PresignedGetObject(context.Background(), bucketName, "", 3600*time.Second, nil) - if err == nil { - logError(testName, function, args, startTime, "", "PresignedGetObject success", err) - return - } - - // Generate presigned GET object url. - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - resp.Body.Close() - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - - // Set request parameters. - reqParams := make(url.Values) - reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - "reqParams": reqParams, - } - presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err) - return - } - if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": "", - "expires": 3600 * time.Second, - } - _, err = c.PresignedPutObject(context.Background(), bucketName, "", 3600*time.Second) - if err == nil { - logError(testName, function, args, startTime, "", "PresignedPutObject success", err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - "expires": 3600 * time.Second, - } - presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) - - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - buf = bytes.Repeat([]byte("g"), 1<<19) - - req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err) - return - } - - newReadBytes, err = ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err) - return - } - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - function = "RemoveObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - err = c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - args["objectName"] = objectName + "-f" - err = c.RemoveObject(context.Background(), bucketName, objectName+"-f", minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-nolength" - err = c.RemoveObject(context.Background(), bucketName, objectName+"-nolength", minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - args["objectName"] = objectName + "-presigned" - err = c.RemoveObject(context.Background(), bucketName, objectName+"-presigned", minio.RemoveObjectOptions{}) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveObject failed", err) - return - } - - function = "RemoveBucket(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - err = c.RemoveBucket(context.Background(), bucketName) - - if err != nil { - logError(testName, function, args, startTime, "", "RemoveBucket failed", err) - return - } - err = c.RemoveBucket(context.Background(), bucketName) - if err == nil { - logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err) - return - } - if err.Error() != "The specified bucket does not exist" { - logError(testName, function, args, startTime, "", "RemoveBucket failed", err) - return - } - - os.Remove(fileName) - os.Remove(fileName + "-f") - successLogger(testName, functionAll, args, startTime).Info() -} - -// Test for validating GetObject Reader* methods functioning when the -// object is modified in the object store. -func testGetObjectModified() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload an object. - objectName := "myobject" - args["objectName"] = objectName - content := "helloworld" - _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) - return - } - - defer c.RemoveObject(context.Background(), bucketName, objectName, minio.RemoveObjectOptions{}) - - reader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err) - return - } - defer reader.Close() - - // Read a few bytes of the object. - b := make([]byte, 5) - n, err := reader.ReadAt(b, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err) - return - } - - // Upload different contents to the same object while object is being read. - newContent := "goodbyeworld" - _, err = c.PutObject(context.Background(), bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"}) - if err != nil { - logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err) - return - } - - // Confirm that a Stat() call in between doesn't change the Object's cached etag. - _, err = reader.Stat() - expectedError := "At least one of the pre-conditions you specified did not hold" - if err.Error() != expectedError { - logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err) - return - } - - // Read again only to find object contents have been modified since last read. - _, err = reader.ReadAt(b, int64(n)) - if err.Error() != expectedError { - logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject to upload a file seeked at a given offset. -func testPutObjectUploadSeekedObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, fileToUpload, contentType)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileToUpload": "", - "contentType": "binary/octet-stream", - } - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, c) - - var tempfile *os.File - - if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" { - tempfile, err = os.Open(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "File open failed", err) - return - } - args["fileToUpload"] = fileName - } else { - tempfile, err = ioutil.TempFile("", "minio-go-upload-test-") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile create failed", err) - return - } - args["fileToUpload"] = tempfile.Name() - - // Generate 100kB data - if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil { - logError(testName, function, args, startTime, "", "File copy failed", err) - return - } - - defer os.Remove(tempfile.Name()) - - // Seek back to the beginning of the file. - tempfile.Seek(0, 0) - } - var length = 100 * humanize.KiByte - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - offset := length / 2 - if _, err = tempfile.Seek(int64(offset), 0); err != nil { - logError(testName, function, args, startTime, "", "TempFile seek failed", err) - return - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - tempfile.Close() - - obj, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer obj.Close() - - n, err := obj.Seek(int64(offset), 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != int64(offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err) - return - } - - _, err = c.PutObject(context.Background(), bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName+"getobject", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if st.Size != int64(length-offset) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests bucket re-create errors. -func testMakeBucketErrorV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - region := "eu-west-1" - args["bucketName"] = bucketName - args["region"] = region - - // Make a new bucket in 'eu-west-1'. - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: region}); err == nil { - logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err) - return - } - // Verify valid error response from server. - if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" && - minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" { - logError(testName, function, args, startTime, "", "Invalid error returned by server", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object reader to not throw error on being closed twice. -func testGetObjectClosedTwiceV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if err := r.Close(); err == nil { - logError(testName, function, args, startTime, "", "Object is already closed, should return error", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests FPutObject hidden contentType setting -func testFPutObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FPutObject(bucketName, objectName, fileName, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "fileName": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Make a temp file with 11*1024*1024 bytes of data. - file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest") - if err != nil { - logError(testName, function, args, startTime, "", "TempFile creation failed", err) - return - } - - r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024)) - n, err := io.CopyN(file, r, 11*1024*1024) - if err != nil { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - if n != int64(11*1024*1024) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err) - return - } - - // Close the file pro-actively for windows. - err = file.Close() - if err != nil { - logError(testName, function, args, startTime, "", "File close failed", err) - return - } - - // Set base object name - objectName := bucketName + "FPutObject" - args["objectName"] = objectName - args["fileName"] = file.Name() - - // Perform standard FPutObject with contentType provided (Expecting application/octet-stream) - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/octet-stream) - args["objectName"] = objectName + "-Octet" - args["contentType"] = "" - - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - // Add extension to temp file name - fileName := file.Name() - err = os.Rename(fileName, fileName+".gtar") - if err != nil { - logError(testName, function, args, startTime, "", "Rename failed", err) - return - } - - // Perform FPutObject with no contentType provided (Expecting application/x-gtar) - args["objectName"] = objectName + "-Octet" - args["contentType"] = "" - args["fileName"] = fileName + ".gtar" - - _, err = c.FPutObject(context.Background(), bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FPutObject failed", err) - return - } - - // Check headers and sizes - rStandard, err := c.StatObject(context.Background(), bucketName, objectName+"-standard", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if rStandard.Size != 11*1024*1024 { - logError(testName, function, args, startTime, "", "Unexpected size", nil) - return - } - - if rStandard.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err) - return - } - - rOctet, err := c.StatObject(context.Background(), bucketName, objectName+"-Octet", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rOctet.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err) - return - } - - if rOctet.Size != 11*1024*1024 { - logError(testName, function, args, startTime, "", "Unexpected size", nil) - return - } - - rGTar, err := c.StatObject(context.Background(), bucketName, objectName+"-GTar", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if rGTar.Size != 11*1024*1024 { - logError(testName, function, args, startTime, "", "Unexpected size", nil) - return - } - if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { - logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) - return - } - - os.Remove(fileName + ".gtar") - successLogger(testName, function, args, startTime).Info() -} - -// Tests various bucket supported formats. -func testMakeBucketRegionsV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "MakeBucket(bucketName, region)" - args := map[string]interface{}{ - "bucketName": "", - "region": "eu-west-1", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket in 'eu-central-1'. - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "eu-west-1"}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - if err = cleanupBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) - return - } - - // Make a new bucket with '.' in its name, in 'us-west-2'. This - // request is internally staged into a path style instead of - // virtual host style. - if err = c.MakeBucket(context.Background(), bucketName+".withperiod", minio.MakeBucketOptions{Region: "us-west-2"}); err != nil { - args["bucketName"] = bucketName + ".withperiod" - args["region"] = "us-west-2" - logError(testName, function, args, startTime, "", "MakeBucket test with a bucket name with period, '.', failed", err) - return - } - - // Delete all objects and buckets - if err = cleanupBucket(bucketName+".withperiod", c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed while removing bucket recursively", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderSeeker interface methods. -func testGetObjectReadSeekFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data. - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err) - return - } - - offset := int64(2048) - n, err := r.Seek(offset, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) - return - } - n, err = r.Seek(0, 1) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err) - return - } - _, err = r.Seek(offset, 2) - if err == nil { - logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err) - return - } - n, err = r.Seek(-offset, 2) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != st.Size-offset { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err) - return - } - - var buffer1 bytes.Buffer - if _, err = io.CopyN(&buffer1, r, st.Size); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - } - if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - - // Seek again and read again. - n, err = r.Seek(offset-1, 0) - if err != nil { - logError(testName, function, args, startTime, "", "Seek failed", err) - return - } - if n != (offset - 1) { - logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err) - return - } - - var buffer2 bytes.Buffer - if _, err = io.CopyN(&buffer2, r, st.Size); err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "Copy failed", err) - return - } - } - // Verify now lesser bytes. - if !bytes.Equal(buf[2047:], buffer2.Bytes()) { - logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests get object ReaderAt interface methods. -func testGetObjectReadAtFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(bucketName, objectName)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - buf, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - - // Save the data - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer r.Close() - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - - offset := int64(2048) - - // Read directly - buf2 := make([]byte, 512) - buf3 := make([]byte, 512) - buf4 := make([]byte, 512) - - m, err := r.ReadAt(buf2, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf2) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err) - return - } - if !bytes.Equal(buf2, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf3, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf3) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err) - return - } - if !bytes.Equal(buf3, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - offset += 512 - m, err = r.ReadAt(buf4, offset) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - if m != len(buf4) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err) - return - } - if !bytes.Equal(buf4, buf[offset:offset+512]) { - logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err) - return - } - - buf5 := make([]byte, bufSize) - // Read the whole object. - m, err = r.ReadAt(buf5, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - if m != len(buf5) { - logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err) - return - } - if !bytes.Equal(buf, buf5) { - logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err) - return - } - - buf6 := make([]byte, bufSize+1) - // Read the whole object and beyond. - _, err = r.ReadAt(buf6, 0) - if err != nil { - if err != io.EOF { - logError(testName, function, args, startTime, "", "ReadAt failed", err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Tests copy object -func testCopyObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, c) - - // Make a new bucket in 'us-east-1' (destination bucket). - err = c.MakeBucket(context.Background(), bucketName+"-copy", minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName+"-copy", c) - - // Generate 33K of data. - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - r, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - r.Close() - - // Copy Source - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - MatchModifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), - MatchETag: objInfo.ETag, - } - args["source"] = src - - // Set copy conditions. - dst := minio.CopyDestOptions{ - Bucket: bucketName + "-copy", - Object: objectName + "-copy", - } - args["destination"] = dst - - // Perform the Copy - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Source object - r, err = c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Destination object - readerCopy, err := c.GetObject(context.Background(), bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - // Check the various fields of source object against destination object. - objInfo, err = r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - objInfoCopy, err := readerCopy.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - if objInfo.Size != objInfoCopy.Size { - logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err) - return - } - - // Close all the readers. - r.Close() - readerCopy.Close() - - // CopyObject again but with wrong conditions - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - MatchUnmodifiedSince: time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC), - NoMatchETag: objInfo.ETag, - } - - // Perform the Copy which should fail - _, err = c.CopyObject(context.Background(), dst, src) - if err == nil { - logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testComposeObjectErrorCasesWrapper(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Test that more than 10K source objects cannot be - // concatenated. - srcArr := [10001]minio.CopySrcOptions{} - srcSlice := srcArr[:] - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "object", - } - - args["destination"] = dst - // Just explain about srcArr in args["sourceList"] - // to stop having 10,001 null headers logged - args["sourceList"] = "source array of 10,001 elements" - if _, err := c.ComposeObject(context.Background(), dst, srcSlice...); err == nil { - logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err) - return - } else if err.Error() != "There must be as least one and up to 10000 source objects." { - logError(testName, function, args, startTime, "", "Got unexpected error", err) - return - } - - // Create a source with invalid offset spec and check that - // error is returned: - // 1. Create the source object. - const badSrcSize = 5 * 1024 * 1024 - buf := bytes.Repeat([]byte("1"), badSrcSize) - _, err = c.PutObject(context.Background(), bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - // 2. Set invalid range spec on the object (going beyond - // object size) - badSrc := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "badObject", - MatchRange: true, - Start: 1, - End: badSrcSize, - } - - // 3. ComposeObject call should fail. - if _, err := c.ComposeObject(context.Background(), dst, badSrc); err == nil { - logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err) - return - } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") { - logError(testName, function, args, startTime, "", "Got invalid error", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test expected error cases -func testComposeObjectErrorCasesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - testComposeObjectErrorCasesWrapper(c) -} - -func testComposeMultipleSources(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{ - "destination": "", - "sourceList": "", - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Upload a small source object - const srcSize = 1024 * 1024 * 5 - buf := bytes.Repeat([]byte("1"), srcSize) - _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // We will append 10 copies of the object. - srcs := []minio.CopySrcOptions{} - for i := 0; i < 10; i++ { - srcs = append(srcs, minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObject", - }) - } - - // make the last part very small - srcs[9].MatchRange = true - - args["sourceList"] = srcs - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject", - } - args["destination"] = dst - - ui, err := c.ComposeObject(context.Background(), dst, srcs...) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - if ui.Size != 9*srcSize+1 { - logError(testName, function, args, startTime, "", "ComposeObject returned unexpected size", err) - return - } - - objProps, err := c.StatObject(context.Background(), bucketName, "dstObject", minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if objProps.Size != 9*srcSize+1 { - logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test concatenating multiple 10K objects V2 -func testCompose10KSourcesV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - testComposeMultipleSources(c) -} - -func testEncryptedEmptyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, objectSize, opts)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - sse := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"object")) - - // 1. create an sse-c encrypted object to copy by uploading - const srcSize = 0 - var buf []byte // Empty buffer - args["objectName"] = "object" - _, err = c.PutObject(context.Background(), bucketName, "object", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ServerSideEncryption: sse}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - // 2. Test CopyObject for an empty object - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "object", - Encryption: sse, - } - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "new-object", - Encryption: sse, - } - - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - function = "CopyObject(dst, src)" - logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject failed", err) - return - } - - // 3. Test Key rotation - newSSE := encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"new-object")) - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: "new-object", - Encryption: sse, - } - - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: "new-object", - Encryption: newSSE, - } - - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - function = "CopyObject(dst, src)" - logError(testName, function, map[string]interface{}{}, startTime, "", "CopyObject with key rotation failed", err) - return - } - - // 4. Download the object. - reader, err := c.GetObject(context.Background(), bucketName, "new-object", minio.GetObjectOptions{ServerSideEncryption: newSSE}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer reader.Close() - - decBytes, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, map[string]interface{}{}, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, map[string]interface{}{}, startTime, "", "Downloaded object doesn't match the empty encrypted object", err) - return - } - - delete(args, "objectName") - successLogger(testName, function, args, startTime).Info() -} - -func testEncryptedCopyObjectWrapper(c *minio.Client, bucketName string, sseSrc, sseDst encrypt.ServerSide) { - // initialize logging params - startTime := time.Now() - testName := getFuncNameLoc(2) - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - var srcEncryption, dstEncryption encrypt.ServerSide - - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // 1. create an sse-c encrypted object to copy by uploading - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - _, err = c.PutObject(context.Background(), bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ - ServerSideEncryption: sseSrc, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - if sseSrc != nil && sseSrc.Type() != encrypt.S3 { - srcEncryption = sseSrc - } - - // 2. copy object and change encryption key - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObject", - Encryption: srcEncryption, - } - args["source"] = src - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject", - Encryption: sseDst, - } - args["destination"] = dst - - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - if sseDst != nil && sseDst.Type() != encrypt.S3 { - dstEncryption = sseDst - } - // 3. get copied object and check if content is equal - coreClient := minio.Core{c} - reader, _, _, err := coreClient.GetObject(context.Background(), bucketName, "dstObject", minio.GetObjectOptions{ServerSideEncryption: dstEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - decBytes, err := ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - reader.Close() - - // Test key rotation for source object in-place. - var newSSE encrypt.ServerSide - if sseSrc != nil && sseSrc.Type() == encrypt.SSEC { - newSSE = encrypt.DefaultPBKDF([]byte("Don't Panic"), []byte(bucketName+"srcObject")) // replace key - } - if sseSrc != nil && sseSrc.Type() == encrypt.S3 { - newSSE = encrypt.NewSSE() - } - if newSSE != nil { - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: "srcObject", - Encryption: newSSE, - } - args["destination"] = dst - - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - // Get copied object and check if content is equal - reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{ServerSideEncryption: newSSE}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - decBytes, err = ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - reader.Close() - - // Test in-place decryption. - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: "srcObject", - } - args["destination"] = dst - - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObject", - Encryption: newSSE, - } - args["source"] = src - _, err = c.CopyObject(context.Background(), dst, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject Key rotation failed", err) - return - } - } - - // Get copied decrypted object and check if content is equal - reader, _, _, err = coreClient.GetObject(context.Background(), bucketName, "srcObject", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - defer reader.Close() - - decBytes, err = ioutil.ReadAll(reader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(decBytes, buf) { - logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test encrypted copy object -func testUnencryptedToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, nil, sseDst) -} - -// Test encrypted copy object -func testUnencryptedToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc encrypt.ServerSide - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testUnencryptedToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - var sseSrc, sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSECToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToSSECCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToSSES3CopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - sseDst := encrypt.NewSSE() - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedSSES3ToUnencryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.NewSSE() - var sseDst encrypt.ServerSide - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -// Test encrypted copy object -func testEncryptedCopyObjectV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - - sseSrc := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"srcObject")) - sseDst := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+"dstObject")) - // c.TraceOn(os.Stderr) - testEncryptedCopyObjectWrapper(c, bucketName, sseSrc, sseDst) -} - -func testDecryptedCopyObject() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v2 client object creation failed", err) - return - } - - bucketName, objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-"), "object" - if err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}); err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - encryption := encrypt.DefaultPBKDF([]byte("correct horse battery staple"), []byte(bucketName+objectName)) - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(bytes.Repeat([]byte("a"), 1024*1024)), 1024*1024, minio.PutObjectOptions{ - ServerSideEncryption: encryption, - }) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: objectName, - Encryption: encrypt.SSECopy(encryption), - } - args["source"] = src - - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "decrypted-" + objectName, - } - args["destination"] = dst - - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - if _, err = c.GetObject(context.Background(), bucketName, "decrypted-"+objectName, minio.GetObjectOptions{}); err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - successLogger(testName, function, args, startTime).Info() -} - -func testSSECMultipartEncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 6MB of data - buf := bytes.Repeat([]byte("abcdef"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - // Upload a 6MB object using multipart mechanism - uploadID, err := c.NewMultipartUpload(context.Background(), bucketName, objectName, minio.PutObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - var completeParts []minio.CompletePart - - part, err := c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 1, bytes.NewReader(buf[:5*1024*1024]), 5*1024*1024, "", "", srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) - return - } - completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) - - part, err = c.PutObjectPart(context.Background(), bucketName, objectName, uploadID, 2, bytes.NewReader(buf[5*1024*1024:]), 1024*1024, "", "", srcencryption) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectPart call failed", err) - return - } - completeParts = append(completeParts, minio.CompletePart{PartNumber: part.PartNumber, ETag: part.ETag}) - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), bucketName, objectName, uploadID, completeParts, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err = c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = objInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err = c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (6*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 6*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 6*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 6MB", err) - return - } - - getOpts.SetRange(6*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 6*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:6*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 6MB", err) - return - } - if getBuf[6*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation -func testSSECEncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - putmetadata := map[string]string{ - "Content-Type": "binary/octet-stream", - } - opts := minio.PutObjectOptions{ - UserMetadata: putmetadata, - ServerSideEncryption: srcencryption, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSEC encrypted to unencrypted copy -func testSSECEncryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - ServerSideEncryption: srcencryption, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - var dstencryption encrypt.ServerSide - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSEC encrypted to SSE-S3 encrypted copy -func testSSECEncryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) - putmetadata := map[string]string{ - "Content-Type": "binary/octet-stream", - } - opts := minio.PutObjectOptions{ - UserMetadata: putmetadata, - ServerSideEncryption: srcencryption, - } - - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - encrypt.SSECopy(srcencryption).Marshal(header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSEC encryption copy part -func testUnencryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - putmetadata := map[string]string{ - "Content-Type": "binary/octet-stream", - } - opts := minio.PutObjectOptions{ - UserMetadata: putmetadata, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy -func testUnencryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - putmetadata := map[string]string{ - "Content-Type": "binary/octet-stream", - } - opts := minio.PutObjectOptions{ - UserMetadata: putmetadata, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy -func testUnencryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for SSE-S3 to SSEC encryption copy part -func testSSES3EncryptedToSSECCopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - password := "correct horse battery staple" - srcEncryption := encrypt.NewSSE() - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - ServerSideEncryption: srcEncryption, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to unencrypted copy -func testSSES3EncryptedToUnencryptedCopyPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - srcEncryption := encrypt.NewSSE() - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - ServerSideEncryption: srcEncryption, - } - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} - -// Test Core CopyObjectPart implementation for unencrypted to SSE-S3 encrypted copy -func testSSES3EncryptedToSSES3CopyObjectPart() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObjectPart(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - client, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Instantiate new core client object. - c := minio.Core{client} - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, client) - // Make a buffer with 5MB of data - buf := bytes.Repeat([]byte("abcde"), 1024*1024) - - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - srcEncryption := encrypt.NewSSE() - opts := minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Content-Type": "binary/octet-stream", - }, - ServerSideEncryption: srcEncryption, - } - - uploadInfo, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", opts) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{ServerSideEncryption: srcEncryption}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), st.Size), err) - return - } - - destBucketName := bucketName - destObjectName := objectName + "-dest" - dstencryption := encrypt.NewSSE() - - uploadID, err := c.NewMultipartUpload(context.Background(), destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) - if err != nil { - logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) - return - } - - // Content of the destination object will be two copies of - // `objectName` concatenated, followed by first byte of - // `objectName`. - metadata := make(map[string]string) - header := make(http.Header) - dstencryption.Marshal(header) - - for k, v := range header { - metadata[k] = v[0] - } - - metadata["x-amz-copy-source-if-match"] = uploadInfo.ETag - - // First of three parts - fstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Second of three parts - sndPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Last of three parts - lstPart, err := c.CopyObjectPart(context.Background(), bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) - return - } - - // Complete the multipart upload - _, err = c.CompleteMultipartUpload(context.Background(), destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) - return - } - - // Stat the object and check its length matches - objInfo, err := c.StatObject(context.Background(), destBucketName, destObjectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject call failed", err) - return - } - - if objInfo.Size != (5*1024*1024)*2+1 { - logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) - return - } - - // Now we read the data back - getOpts := minio.GetObjectOptions{} - getOpts.SetRange(0, 5*1024*1024-1) - r, _, _, err := c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf := make([]byte, 5*1024*1024) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf, buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) - return - } - - getOpts.SetRange(5*1024*1024, 0) - r, _, _, err = c.GetObject(context.Background(), destBucketName, destObjectName, getOpts) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject call failed", err) - return - } - getBuf = make([]byte, 5*1024*1024+1) - _, err = readFull(r, getBuf) - if err != nil { - logError(testName, function, args, startTime, "", "Read buffer failed", err) - return - } - if !bytes.Equal(getBuf[:5*1024*1024], buf) { - logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) - return - } - if getBuf[5*1024*1024] != buf[0] { - logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) - return - } - - successLogger(testName, function, args, startTime).Info() - - // Do not need to remove destBucketName its same as bucketName. -} -func testUserMetadataCopying() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - // c.TraceOn(os.Stderr) - testUserMetadataCopyingWrapper(c) -} - -func testUserMetadataCopyingWrapper(c *minio.Client) { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - // Make a new bucket in 'us-east-1' (source bucket). - err := c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - h.Add(k, vs[0]) - } - } - return h - } - - // 1. create a client encrypted object to copy by uploading - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB - metadata := make(http.Header) - metadata.Set("x-amz-meta-myheader", "myvalue") - m := make(map[string]string) - m["x-amz-meta-myheader"] = "myvalue" - _, err = c.PutObject(context.Background(), bucketName, "srcObject", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err) - return - } - if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 2. create source - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObject", - } - - // 2.1 create destination with metadata set - dst1 := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject-1", - UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, - ReplaceMetadata: true, - } - - // 3. Check that copying to an object with metadata set resets - // the headers on the copy. - args["source"] = src - args["destination"] = dst1 - _, err = c.CopyObject(context.Background(), dst1, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - expectedHeaders := make(http.Header) - expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 4. create destination with no metadata set and same source - dst2 := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject-2", - } - - // 5. Check that copying to an object with no metadata set, - // copies metadata. - args["source"] = src - args["destination"] = dst2 - _, err = c.CopyObject(context.Background(), dst2, src) - if err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed", err) - return - } - - expectedHeaders = metadata - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 6. Compose a pair of sources. - dst3 := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject-3", - ReplaceMetadata: true, - } - - function = "ComposeObject(destination, sources)" - args["source"] = []minio.CopySrcOptions{src, src} - args["destination"] = dst3 - _, err = c.ComposeObject(context.Background(), dst3, src, src) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Check that no headers are copied in this case - if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - // 7. Compose a pair of sources with dest user metadata set. - dst4 := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "dstObject-4", - UserMetadata: map[string]string{"notmyheader": "notmyvalue"}, - ReplaceMetadata: true, - } - - function = "ComposeObject(destination, sources)" - args["source"] = []minio.CopySrcOptions{src, src} - args["destination"] = dst4 - _, err = c.ComposeObject(context.Background(), dst4, src, src) - if err != nil { - logError(testName, function, args, startTime, "", "ComposeObject failed", err) - return - } - - // Check that no headers are copied in this case - expectedHeaders = make(http.Header) - expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue") - if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testUserMetadataCopyingV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "CopyObject(destination, source)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // c.TraceOn(os.Stderr) - testUserMetadataCopyingWrapper(c) -} - -func testStorageClassMetadataPutObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassMetadataPutObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - metadata := make(http.Header) - metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - - emptyMetadata := make(http.Header) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - - _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Get the returned metadata - returnedMeta := fetchMeta("srcObjectRRSClass") - - // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) - if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - metadata = make(http.Header) - metadata.Set("x-amz-storage-class", "STANDARD") - - _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClass")) { - logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testStorageClassInvalidMetadataPutObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassInvalidMetadataPutObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 1MiB - - _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "INVALID_STORAGE_CLASS"}) - if err == nil { - logError(testName, function, args, startTime, "", "PutObject with invalid storage class passed, was expected to fail", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -func testStorageClassMetadataCopyObject() { - // initialize logging params - startTime := time.Now() - function := "testStorageClassMetadataCopyObject()" - args := map[string]interface{}{} - testName := getFuncName() - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO v4 client object creation failed", err) - return - } - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") - // Make a new bucket in 'us-east-1' (source bucket). - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - fetchMeta := func(object string) (h http.Header) { - objInfo, err := c.StatObject(context.Background(), bucketName, object, minio.StatObjectOptions{}) - args["bucket"] = bucketName - args["object"] = object - if err != nil { - logError(testName, function, args, startTime, "", "Stat failed", err) - return - } - h = make(http.Header) - for k, vs := range objInfo.Metadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-storage-class") { - for _, v := range vs { - h.Add(k, v) - } - } - } - return h - } - - metadata := make(http.Header) - metadata.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") - - emptyMetadata := make(http.Header) - - const srcSize = 1024 * 1024 - buf := bytes.Repeat([]byte("abcde"), srcSize) - - // Put an object with RRS Storage class - _, err = c.PutObject(context.Background(), bucketName, "srcObjectRRSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "REDUCED_REDUNDANCY"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Make server side copy of object uploaded in previous step - src := minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObjectRRSClass", - } - dst := minio.CopyDestOptions{ - Bucket: bucketName, - Object: "srcObjectRRSClassCopy", - } - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed on RRS", err) - return - } - - // Get the returned metadata - returnedMeta := fetchMeta("srcObjectRRSClassCopy") - - // The response metada should either be equal to metadata (with REDUCED_REDUNDANCY) or emptyMetadata (in case of gateways) - if !reflect.DeepEqual(metadata, returnedMeta) && !reflect.DeepEqual(emptyMetadata, returnedMeta) { - logError(testName, function, args, startTime, "", "Metadata match failed", err) - return - } - - metadata = make(http.Header) - metadata.Set("x-amz-storage-class", "STANDARD") - - // Put an object with Standard Storage class - _, err = c.PutObject(context.Background(), bucketName, "srcObjectSSClass", - bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{StorageClass: "STANDARD"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Make server side copy of object uploaded in previous step - src = minio.CopySrcOptions{ - Bucket: bucketName, - Object: "srcObjectSSClass", - } - dst = minio.CopyDestOptions{ - Bucket: bucketName, - Object: "srcObjectSSClassCopy", - } - if _, err = c.CopyObject(context.Background(), dst, src); err != nil { - logError(testName, function, args, startTime, "", "CopyObject failed on SS", err) - return - } - // Fetch the meta data of copied object - if reflect.DeepEqual(metadata, fetchMeta("srcObjectSSClassCopy")) { - logError(testName, function, args, startTime, "", "Metadata verification failed, STANDARD storage class should not be a part of response metadata", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with size -1 byte object. -func testPutObjectNoLengthV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": -1, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - objectName := bucketName + "unique" - args["objectName"] = objectName - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - args["size"] = bufSize - - // Upload an object. - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, -1, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(st.Size), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put objects of unknown size. -func testPutObjectsUnknownV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader,size,opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": "", - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Issues are revealed by trying to upload multiple files of unknown size - // sequentially (on 4GB machines) - for i := 1; i <= 4; i++ { - // Simulate that we could be receiving byte slices of data that we want - // to upload as a file - rpipe, wpipe := io.Pipe() - defer rpipe.Close() - go func() { - b := []byte("test") - wpipe.Write(b) - wpipe.Close() - }() - - // Upload the object. - objectName := fmt.Sprintf("%sunique%d", bucketName, i) - args["objectName"] = objectName - - ui, err := c.PutObject(context.Background(), bucketName, objectName, rpipe, -1, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err) - return - } - - if ui.Size != 4 { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(ui.Size), nil) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObjectStreaming failed", err) - return - } - - if st.Size != int64(4) { - logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(st.Size), err) - return - } - - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test put object with 0 byte object. -func testPutObject0ByteV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectName": "", - "size": 0, - "opts": "", - } - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - objectName := bucketName + "unique" - args["objectName"] = objectName - args["opts"] = minio.PutObjectOptions{} - - // Upload an object. - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err) - return - } - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObjectWithSize failed", err) - return - } - if st.Size != 0 { - logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(st.Size), err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test expected error cases -func testComposeObjectErrorCases() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - testComposeObjectErrorCasesWrapper(c) -} - -// Test concatenating multiple 10K objects V4 -func testCompose10KSources() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ComposeObject(destination, sourceList)" - args := map[string]interface{}{} - - // Instantiate new minio client object - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) - return - } - - testComposeMultipleSources(c) -} - -// Tests comprehensive list of all methods. -func testFunctionalV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "testFunctionalV2()" - functionAll := "" - args := map[string]interface{}{} - - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable to debug - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - location := "us-east-1" - // Make a new bucket. - function = "MakeBucket(bucketName, location)" - functionAll = "MakeBucket(bucketName, location)" - args = map[string]interface{}{ - "bucketName": bucketName, - "location": location, - } - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: location}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - // Generate a random file name. - fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - file, err := os.Create(fileName) - if err != nil { - logError(testName, function, args, startTime, "", "file create failed", err) - return - } - for i := 0; i < 3; i++ { - buf := make([]byte, rand.Intn(1<<19)) - _, err = file.Write(buf) - if err != nil { - logError(testName, function, args, startTime, "", "file write failed", err) - return - } - } - file.Close() - - // Verify if bucket exits and you have access. - var exists bool - function = "BucketExists(bucketName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - } - exists, err = c.BucketExists(context.Background(), bucketName) - if err != nil { - logError(testName, function, args, startTime, "", "BucketExists failed", err) - return - } - if !exists { - logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err) - return - } - - // Make the bucket 'public read/write'. - function = "SetBucketPolicy(bucketName, bucketPolicy)" - functionAll += ", " + function - - readWritePolicy := `{"Version": "2012-10-17","Statement": [{"Action": ["s3:ListBucketMultipartUploads", "s3:ListBucket"],"Effect": "Allow","Principal": {"AWS": ["*"]},"Resource": ["arn:aws:s3:::` + bucketName + `"],"Sid": ""}]}` - - args = map[string]interface{}{ - "bucketName": bucketName, - "bucketPolicy": readWritePolicy, - } - err = c.SetBucketPolicy(context.Background(), bucketName, readWritePolicy) - - if err != nil { - logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) - return - } - - // List all buckets. - function = "ListBuckets()" - functionAll += ", " + function - args = nil - buckets, err := c.ListBuckets(context.Background()) - if len(buckets) == 0 { - logError(testName, function, args, startTime, "", "List buckets cannot be empty", err) - return - } - if err != nil { - logError(testName, function, args, startTime, "", "ListBuckets failed", err) - return - } - - // Verify if previously created bucket is listed in list buckets. - bucketFound := false - for _, bucket := range buckets { - if bucket.Name == bucketName { - bucketFound = true - } - } - - // If bucket not found error out. - if !bucketFound { - logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err) - return - } - - objectName := bucketName + "unique" - - // Generate data - buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19)) - - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "contentType": "", - } - _, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - st, err := c.StatObject(context.Background(), bucketName, objectName, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) - return - } - - objectNameNoLength := objectName + "-nolength" - args["objectName"] = objectNameNoLength - _, err = c.PutObject(context.Background(), bucketName, objectNameNoLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - st, err = c.StatObject(context.Background(), bucketName, objectNameNoLength, minio.StatObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "StatObject failed", err) - return - } - if st.Size != int64(len(buf)) { - logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(st.Size), err) - return - } - - // Instantiate a done channel to close all listing. - doneCh := make(chan struct{}) - defer close(doneCh) - - objFound := false - isRecursive := true // Recursive is true. - function = "ListObjects(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - for obj := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Prefix: objectName, Recursive: isRecursive}) { - if obj.Key == objectName { - objFound = true - break - } - } - if !objFound { - logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err) - return - } - - incompObjNotFound := true - function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "isRecursive": isRecursive, - } - for objIncompl := range c.ListIncompleteUploads(context.Background(), bucketName, objectName, isRecursive) { - if objIncompl.Key != "" { - incompObjNotFound = false - break - } - } - if !incompObjNotFound { - logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - } - newReader, err := c.GetObject(context.Background(), bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err := ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - function = "FGetObject(bucketName, objectName, fileName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "fileName": fileName + "-f", - } - err = c.FGetObject(context.Background(), bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FgetObject failed", err) - return - } - - // Generate presigned HEAD object url. - function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedHeadURL, err := c.PresignedHeadObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err) - return - } - - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logError(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } - - httpClient := &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively canceled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: transport, - } - - req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) - return - } - - // Verify if presigned url works. - resp, err := httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err) - return - } - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err) - return - } - if resp.Header.Get("ETag") == "" { - logError(testName, function, args, startTime, "", "Got empty ETag", err) - return - } - resp.Body.Close() - - // Generate presigned GET object url. - function = "PresignedGetObject(bucketName, objectName, expires, reqParams)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName, - "expires": 3600 * time.Second, - } - presignedGetURL, err := c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - resp.Body.Close() - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - // Set request parameters. - reqParams := make(url.Values) - reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"") - // Generate presigned GET object url. - args["reqParams"] = reqParams - presignedGetURL, err = c.PresignedGetObject(context.Background(), bucketName, objectName, 3600*time.Second, reqParams) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject failed", err) - return - } - - // Verify if presigned url works. - req, err = http.NewRequest(http.MethodGet, presignedGetURL.String(), nil) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject request incorrect", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err) - return - } - - if resp.StatusCode != http.StatusOK { - logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err) - return - } - newPresignedBytes, err = ioutil.ReadAll(resp.Body) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - if !bytes.Equal(newPresignedBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - // Verify content disposition. - if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" { - logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err) - return - } - - function = "PresignedPutObject(bucketName, objectName, expires)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - "expires": 3600 * time.Second, - } - presignedPutURL, err := c.PresignedPutObject(context.Background(), bucketName, objectName+"-presigned", 3600*time.Second) - if err != nil { - logError(testName, function, args, startTime, "", "PresignedPutObject failed", err) - return - } - - // Generate data more than 32K - buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024) - - req, err = http.NewRequest(http.MethodPut, presignedPutURL.String(), bytes.NewReader(buf)) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) - return - } - - resp, err = httpClient.Do(req) - if err != nil { - logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err) - return - } - - function = "GetObject(bucketName, objectName)" - functionAll += ", " + function - args = map[string]interface{}{ - "bucketName": bucketName, - "objectName": objectName + "-presigned", - } - newReader, err = c.GetObject(context.Background(), bucketName, objectName+"-presigned", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - newReadBytes, err = ioutil.ReadAll(newReader) - if err != nil { - logError(testName, function, args, startTime, "", "ReadAll failed", err) - return - } - newReader.Close() - - if !bytes.Equal(newReadBytes, buf) { - logError(testName, function, args, startTime, "", "Bytes mismatch", err) - return - } - - os.Remove(fileName) - os.Remove(fileName + "-f") - successLogger(testName, functionAll, args, startTime).Info() -} - -// Test get object with GetObject with context -func testGetObjectContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - cancel() - - r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) - return - } - - if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) - return - } - r.Close() - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - defer cancel() - - // Read the data back - r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "object Stat call failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", "object Close() call failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with FGetObject with a user provided context -func testFGetObjectContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FGetObject(ctx, bucketName, objectName, fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - fileName := "tempfile-context" - args["fileName"] = fileName - // Read the data back - err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err == nil { - logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) - return - } - if err = os.Remove(fileName + "-fcontext"); err != nil { - logError(testName, function, args, startTime, "", "Remove file failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with GetObject with a user provided context -func testGetObjectRanges() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(ctx, bucketName, objectName, fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - - rng := rand.NewSource(time.Now().UnixNano()) - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rng, "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rng, "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - // Read the data back - tests := []struct { - start int64 - end int64 - }{ - { - start: 1024, - end: 1024 + 1<<20, - }, - { - start: 20e6, - end: 20e6 + 10000, - }, - { - start: 40e6, - end: 40e6 + 10000, - }, - { - start: 60e6, - end: 60e6 + 10000, - }, - { - start: 80e6, - end: 80e6 + 10000, - }, - { - start: 120e6, - end: int64(bufSize), - }, - } - for _, test := range tests { - wantRC := getDataReader("datafile-129-MB") - io.CopyN(ioutil.Discard, wantRC, test.start) - want := mustCrcReader(io.LimitReader(wantRC, test.end-test.start+1)) - opts := minio.GetObjectOptions{} - opts.SetRange(test.start, test.end) - args["opts"] = fmt.Sprintf("%+v", test) - obj, err := c.GetObject(ctx, bucketName, objectName, opts) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject with long timeout failed", err) - return - } - err = crcMatches(obj, want) - if err != nil { - logError(testName, function, args, startTime, "", fmt.Sprintf("GetObject offset %d -> %d", test.start, test.end), err) - return - } - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test get object ACLs with GetObjectACL with custom provided context -func testGetObjectACLContext() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObjectACL(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // skipping region functional tests for non s3 runs - if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { - ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() - return - } - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Add meta data to add a canned acl - metaData := map[string]string{ - "X-Amz-Acl": "public-read-write", - } - - _, err = c.PutObject(context.Background(), bucketName, - objectName, reader, int64(bufSize), - minio.PutObjectOptions{ - ContentType: "binary/octet-stream", - UserMetadata: metaData, - }) - - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - args["ctx"] = ctx - defer cancel() - - // Read the data back - objectInfo, getObjectACLErr := c.GetObjectACL(ctx, bucketName, objectName) - if getObjectACLErr != nil { - logError(testName, function, args, startTime, "", "GetObjectACL failed. ", getObjectACLErr) - return - } - - s, ok := objectInfo.Metadata["X-Amz-Acl"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "public-read-write" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) - return - } - - bufSize = dataFileMap["datafile-1-MB"] - var reader2 = getDataReader("datafile-1-MB") - defer reader2.Close() - // Save the data - objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Add meta data to add a canned acl - metaData = map[string]string{ - "X-Amz-Grant-Read": "id=fooread@minio.go", - "X-Amz-Grant-Write": "id=foowrite@minio.go", - } - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject failed", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) - args["ctx"] = ctx - defer cancel() - - // Read the data back - objectInfo, getObjectACLErr = c.GetObjectACL(ctx, bucketName, objectName) - if getObjectACLErr == nil { - logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) - return - } - - if len(objectInfo.Metadata) != 3 { - logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) - return - } - - s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "fooread@minio.go" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) - return - } - - s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] - if !ok { - logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) - return - } - - if len(s) != 1 { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) - return - } - - if s[0] != "foowrite@minio.go" { - logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Test validates putObject with context to see if request cancellation is honored for V2. -func testPutObjectContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "PutObject(ctx, bucketName, objectName, reader, size, opts)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "size": "", - "opts": "", - } - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Make a new bucket. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - defer cleanupBucket(bucketName, c) - bufSize := dataFileMap["datatfile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - - objectName := fmt.Sprintf("test-file-%v", rand.Uint32()) - args["objectName"] = objectName - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - args["ctx"] = ctx - args["size"] = bufSize - defer cancel() - - _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject with short timeout failed", err) - return - } - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - args["ctx"] = ctx - - defer cancel() - reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject with long timeout failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with GetObject with custom context -func testGetObjectContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "GetObject(ctx, bucketName, objectName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - cancel() - - r, err := c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject failed unexpectedly", err) - return - } - if _, err = r.Stat(); err == nil { - logError(testName, function, args, startTime, "", "GetObject should fail on short timeout", err) - return - } - r.Close() - - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - r, err = c.GetObject(ctx, bucketName, objectName, minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "GetObject shouldn't fail on longer timeout", err) - return - } - - st, err := r.Stat() - if err != nil { - logError(testName, function, args, startTime, "", "object Stat call failed", err) - return - } - if st.Size != int64(bufSize) { - logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err) - return - } - if err := r.Close(); err != nil { - logError(testName, function, args, startTime, "", " object Close() call failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test get object with FGetObject with custom context -func testFGetObjectContextV2() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "FGetObject(ctx, bucketName, objectName,fileName)" - args := map[string]interface{}{ - "ctx": "", - "bucketName": "", - "objectName": "", - "fileName": "", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV2(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v2 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket call failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - bufSize := dataFileMap["datatfile-1-MB"] - var reader = getDataReader("datafile-1-MB") - defer reader.Close() - // Save the data - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - _, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) - if err != nil { - logError(testName, function, args, startTime, "", "PutObject call failed", err) - return - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond) - args["ctx"] = ctx - defer cancel() - - fileName := "tempfile-context" - args["fileName"] = fileName - - // Read the data back - err = c.FGetObject(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{}) - if err == nil { - logError(testName, function, args, startTime, "", "FGetObject should fail on short timeout", err) - return - } - ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour) - defer cancel() - - // Read the data back - err = c.FGetObject(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{}) - if err != nil { - logError(testName, function, args, startTime, "", "FGetObject call shouldn't fail on long timeout", err) - return - } - - if err = os.Remove(fileName + "-fcontext"); err != nil { - logError(testName, function, args, startTime, "", "Remove file failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() - -} - -// Test list object v1 and V2 -func testListObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "ListObjects(bucketName, objectPrefix, recursive, doneCh)" - args := map[string]interface{}{ - "bucketName": "", - "objectPrefix": "", - "recursive": "true", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - defer cleanupBucket(bucketName, c) - - testObjects := []struct { - name string - storageClass string - }{ - // Special characters - {"foo bar", "STANDARD"}, - {"foo-%", "STANDARD"}, - {"random-object-1", "STANDARD"}, - {"random-object-2", "REDUCED_REDUNDANCY"}, - } - - for i, object := range testObjects { - bufSize := dataFileMap["datafile-33-kB"] - var reader = getDataReader("datafile-33-kB") - defer reader.Close() - _, err = c.PutObject(context.Background(), bucketName, object.name, reader, int64(bufSize), - minio.PutObjectOptions{ContentType: "binary/octet-stream", StorageClass: object.storageClass}) - if err != nil { - logError(testName, function, args, startTime, "", fmt.Sprintf("PutObject %d call failed", i+1), err) - return - } - } - - testList := func(listFn func(context.Context, string, minio.ListObjectsOptions) <-chan minio.ObjectInfo, bucket string, opts minio.ListObjectsOptions) { - var objCursor int - - // check for object name and storage-class from listing object result - for objInfo := range listFn(context.Background(), bucket, opts) { - if objInfo.Err != nil { - logError(testName, function, args, startTime, "", "ListObjects failed unexpectedly", err) - return - } - if objInfo.Key != testObjects[objCursor].name { - logError(testName, function, args, startTime, "", "ListObjects does not return expected object name", err) - return - } - if objInfo.StorageClass != testObjects[objCursor].storageClass { - // Ignored as Gateways (Azure/GCS etc) wont return storage class - ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() - } - objCursor++ - } - - if objCursor != len(testObjects) { - logError(testName, function, args, startTime, "", "ListObjects returned unexpected number of items", errors.New("")) - return - } - } - - testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, UseV1: true}) - testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true}) - testList(c.ListObjects, bucketName, minio.ListObjectsOptions{Recursive: true, WithMetadata: true}) - - successLogger(testName, function, args, startTime).Info() -} - -// Test deleting multiple objects with object retention set in Governance mode -func testRemoveObjects() { - // initialize logging params - startTime := time.Now() - testName := getFuncName() - function := "RemoveObjects(bucketName, objectsCh, opts)" - args := map[string]interface{}{ - "bucketName": "", - "objectPrefix": "", - "recursive": "true", - } - // Seed random based on current time. - rand.Seed(time.Now().Unix()) - - // Instantiate new minio client object. - c, err := minio.New(os.Getenv(serverEndpoint), - &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Secure: mustParseBool(os.Getenv(enableHTTPS)), - }) - if err != nil { - logError(testName, function, args, startTime, "", "MinIO client v4 object creation failed", err) - return - } - - // Enable tracing, write to stderr. - // c.TraceOn(os.Stderr) - - // Set user agent. - c.SetAppInfo("MinIO-go-FunctionalTest", "0.1.0") - - // Generate a new random bucket name. - bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") - args["bucketName"] = bucketName - objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - args["objectName"] = objectName - - // Make a new bucket. - err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1", ObjectLocking: true}) - if err != nil { - logError(testName, function, args, startTime, "", "MakeBucket failed", err) - return - } - - bufSize := dataFileMap["datafile-129-MB"] - var reader = getDataReader("datafile-129-MB") - defer reader.Close() - - n, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - log.Fatalln(err) - } - log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.") - - // Replace with smaller... - bufSize = dataFileMap["datafile-10-kB"] - reader = getDataReader("datafile-10-kB") - defer reader.Close() - - n, err = c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{}) - if err != nil { - log.Fatalln(err) - } - log.Println("Uploaded", objectName, " of size: ", n, "to bucket: ", bucketName, "Successfully.") - - t := time.Date(2030, time.April, 25, 14, 0, 0, 0, time.UTC) - m := minio.RetentionMode(minio.Governance) - opts := minio.PutObjectRetentionOptions{ - GovernanceBypass: false, - RetainUntilDate: &t, - Mode: &m, - } - err = c.PutObjectRetention(context.Background(), bucketName, objectName, opts) - if err != nil { - log.Fatalln(err) - } - - objectsCh := make(chan minio.ObjectInfo) - // Send object names that are needed to be removed to objectsCh - go func() { - defer close(objectsCh) - // List all objects from a bucket-name with a matching prefix. - for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { - if object.Err != nil { - log.Fatalln(object.Err) - } - objectsCh <- object - } - }() - - for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh, minio.RemoveObjectsOptions{}) { - // Error is expected here because Retention is set on the object - // and RemoveObjects is called without Bypass Governance - if rErr.Err == nil { - logError(testName, function, args, startTime, "", "Expected error during deletion", nil) - return - } - } - - objectsCh1 := make(chan minio.ObjectInfo) - - // Send object names that are needed to be removed to objectsCh - go func() { - defer close(objectsCh1) - // List all objects from a bucket-name with a matching prefix. - for object := range c.ListObjects(context.Background(), bucketName, minio.ListObjectsOptions{UseV1: true, Recursive: true}) { - if object.Err != nil { - log.Fatalln(object.Err) - } - objectsCh1 <- object - } - }() - - opts1 := minio.RemoveObjectsOptions{ - GovernanceBypass: true, - } - - for rErr := range c.RemoveObjects(context.Background(), bucketName, objectsCh1, opts1) { - // Error is not expected here because Retention is set on the object - // and RemoveObjects is called with Bypass Governance - logError(testName, function, args, startTime, "", "Error detected during deletion", rErr.Err) - return - } - - // Delete all objects and buckets - if err = cleanupVersionedBucket(bucketName, c); err != nil { - logError(testName, function, args, startTime, "", "CleanupBucket failed", err) - return - } - - successLogger(testName, function, args, startTime).Info() -} - -// Convert string to bool and always return false if any error -func mustParseBool(str string) bool { - b, err := strconv.ParseBool(str) - if err != nil { - return false - } - return b -} - -func main() { - // Output to stdout instead of the default stderr - log.SetOutput(os.Stdout) - // create custom formatter - mintFormatter := mintJSONFormatter{} - // set custom formatter - log.SetFormatter(&mintFormatter) - // log Info or above -- success cases are Info level, failures are Fatal level - log.SetLevel(log.InfoLevel) - - tls := mustParseBool(os.Getenv(enableHTTPS)) - kms := mustParseBool(os.Getenv(enableKMS)) - if os.Getenv(enableKMS) == "" { - // Default to KMS tests. - kms = true - } - // execute tests - if isFullMode() { - testMakeBucketErrorV2() - testGetObjectClosedTwiceV2() - testFPutObjectV2() - testMakeBucketRegionsV2() - testGetObjectReadSeekFunctionalV2() - testGetObjectReadAtFunctionalV2() - testGetObjectRanges() - testCopyObjectV2() - testFunctionalV2() - testComposeObjectErrorCasesV2() - testCompose10KSourcesV2() - testUserMetadataCopyingV2() - testPutObject0ByteV2() - testPutObjectNoLengthV2() - testPutObjectsUnknownV2() - testGetObjectContextV2() - testFPutObjectContextV2() - testFGetObjectContextV2() - testPutObjectContextV2() - testPutObjectWithVersioning() - testMakeBucketError() - testMakeBucketRegions() - testPutObjectWithMetadata() - testPutObjectReadAt() - testPutObjectStreaming() - testGetObjectSeekEnd() - testGetObjectClosedTwice() - testRemoveMultipleObjects() - testFPutObjectMultipart() - testFPutObject() - testGetObjectReadSeekFunctional() - testGetObjectReadAtFunctional() - testGetObjectReadAtWhenEOFWasReached() - testPresignedPostPolicy() - testCopyObject() - testComposeObjectErrorCases() - testCompose10KSources() - testUserMetadataCopying() - testBucketNotification() - testFunctional() - testGetObjectModified() - testPutObjectUploadSeekedObject() - testGetObjectContext() - testFPutObjectContext() - testFGetObjectContext() - testGetObjectACLContext() - testPutObjectContext() - testStorageClassMetadataPutObject() - testStorageClassInvalidMetadataPutObject() - testStorageClassMetadataCopyObject() - testPutObjectWithContentLanguage() - testListObjects() - testRemoveObjects() - testListObjectVersions() - testStatObjectWithVersioning() - testGetObjectWithVersioning() - testCopyObjectWithVersioning() - testConcurrentCopyObjectWithVersioning() - testComposeObjectWithVersioning() - testRemoveObjectWithVersioning() - testRemoveObjectsWithVersioning() - testObjectTaggingWithVersioning() - - // SSE-C tests will only work over TLS connection. - if tls { - testSSECEncryptionPutGet() - testSSECEncryptionFPut() - testSSECEncryptedGetObjectReadAtFunctional() - testSSECEncryptedGetObjectReadSeekFunctional() - testEncryptedCopyObjectV2() - testEncryptedSSECToSSECCopyObject() - testEncryptedSSECToUnencryptedCopyObject() - testUnencryptedToSSECCopyObject() - testUnencryptedToUnencryptedCopyObject() - testEncryptedEmptyObject() - testDecryptedCopyObject() - testSSECEncryptedToSSECCopyObjectPart() - testSSECMultipartEncryptedToSSECCopyObjectPart() - testSSECEncryptedToUnencryptedCopyPart() - testUnencryptedToSSECCopyObjectPart() - testUnencryptedToUnencryptedCopyPart() - testEncryptedSSECToSSES3CopyObject() - testEncryptedSSES3ToSSECCopyObject() - testSSECEncryptedToSSES3CopyObjectPart() - testSSES3EncryptedToSSECCopyObjectPart() - } - - // KMS tests - if kms { - testSSES3EncryptionPutGet() - testSSES3EncryptionFPut() - testSSES3EncryptedGetObjectReadAtFunctional() - testSSES3EncryptedGetObjectReadSeekFunctional() - testEncryptedSSES3ToSSES3CopyObject() - testEncryptedSSES3ToUnencryptedCopyObject() - testUnencryptedToSSES3CopyObject() - testUnencryptedToSSES3CopyObjectPart() - testSSES3EncryptedToUnencryptedCopyPart() - testSSES3EncryptedToSSES3CopyObjectPart() - } - } else { - testFunctional() - testFunctionalV2() - } -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/go.mod b/mantle/vendor/github.com/minio/minio-go/v7/go.mod deleted file mode 100644 index 92590e97..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/go.mod +++ /dev/null @@ -1,27 +0,0 @@ -module github.com/minio/minio-go/v7 - -go 1.14 - -require ( - github.com/dustin/go-humanize v1.0.0 - github.com/google/uuid v1.1.1 - github.com/json-iterator/go v1.1.10 - github.com/klauspost/cpuid v1.3.1 // indirect - github.com/kr/pretty v0.1.0 // indirect - github.com/minio/md5-simd v1.1.0 - github.com/minio/sha256-simd v0.1.1 - github.com/mitchellh/go-homedir v1.1.0 - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/rs/xid v1.2.1 - github.com/sirupsen/logrus v1.8.1 - github.com/smartystreets/goconvey v1.6.4 // indirect - github.com/stretchr/testify v1.4.0 // indirect - golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f - golang.org/x/net v0.0.0-20200707034311-ab3426394381 - golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect - golang.org/x/text v0.3.3 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/ini.v1 v1.57.0 - gopkg.in/yaml.v2 v2.2.8 // indirect -) diff --git a/mantle/vendor/github.com/minio/minio-go/v7/go.sum b/mantle/vendor/github.com/minio/minio-go/v7/go.sum deleted file mode 100644 index 0e8248ff..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/go.sum +++ /dev/null @@ -1,77 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= -github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= -github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f h1:aZp0e2vLN4MToVqnjNEYEtrEA8RH8U8FN1CU7JgqsPU= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= -gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/mantle/vendor/github.com/minio/minio-go/v7/hook-reader.go b/mantle/vendor/github.com/minio/minio-go/v7/hook-reader.go deleted file mode 100644 index f251c1e9..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/hook-reader.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "fmt" - "io" -) - -// hookReader hooks additional reader in the source stream. It is -// useful for making progress bars. Second reader is appropriately -// notified about the exact number of bytes read from the primary -// source on each Read operation. -type hookReader struct { - source io.Reader - hook io.Reader -} - -// Seek implements io.Seeker. Seeks source first, and if necessary -// seeks hook if Seek method is appropriately found. -func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { - // Verify for source has embedded Seeker, use it. - sourceSeeker, ok := hr.source.(io.Seeker) - if ok { - n, err = sourceSeeker.Seek(offset, whence) - if err != nil { - return 0, err - } - } - - // Verify if hook has embedded Seeker, use it. - hookSeeker, ok := hr.hook.(io.Seeker) - if ok { - var m int64 - m, err = hookSeeker.Seek(offset, whence) - if err != nil { - return 0, err - } - if n != m { - return 0, fmt.Errorf("hook seeker seeked %d bytes, expected source %d bytes", m, n) - } - } - return n, nil -} - -// Read implements io.Reader. Always reads from the source, the return -// value 'n' number of bytes are reported through the hook. Returns -// error for all non io.EOF conditions. -func (hr *hookReader) Read(b []byte) (n int, err error) { - n, err = hr.source.Read(b) - if err != nil && err != io.EOF { - return n, err - } - // Progress the hook with the total read bytes from the source. - if _, herr := hr.hook.Read(b[:n]); herr != nil { - if herr != io.EOF { - return n, herr - } - } - return n, err -} - -// newHook returns a io.ReadSeeker which implements hookReader that -// reports the data read from the source to the hook. -func newHook(source, hook io.Reader) io.Reader { - if hook == nil { - return source - } - return &hookReader{source, hook} -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go deleted file mode 100644 index 3b1b547b..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/assume_role.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/hex" - "encoding/xml" - "errors" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/signer" - sha256 "github.com/minio/sha256-simd" -) - -// AssumeRoleResponse contains the result of successful AssumeRole request. -type AssumeRoleResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"` - - Result AssumeRoleResult `xml:"AssumeRoleResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// AssumeRoleResult - Contains the response to a successful AssumeRole -// request, including temporary credentials that can be used to make -// MinIO API requests. -type AssumeRoleResult struct { - // The identifiers for the temporary security credentials that the operation - // returns. - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - - // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. - // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. - PackedPolicySize int `xml:",omitempty"` -} - -// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if -// those credentials are expired. -type STSAssumeRole struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // STS endpoint to fetch STS credentials. - STSEndpoint string - - // various options for this request. - Options STSAssumeRoleOptions -} - -// STSAssumeRoleOptions collection of various input options -// to obtain AssumeRole credentials. -type STSAssumeRoleOptions struct { - // Mandatory inputs. - AccessKey string - SecretKey string - - Location string // Optional commonly needed with AWS STS. - DurationSeconds int // Optional defaults to 1 hour. - - // Optional only valid if using with AWS STS - RoleARN string - RoleSessionName string -} - -// NewSTSAssumeRole returns a pointer to a new -// Credentials object wrapping the STSAssumeRole. -func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if opts.AccessKey == "" || opts.SecretKey == "" { - return nil, errors.New("AssumeRole credentials access/secretkey is mandatory") - } - return New(&STSAssumeRole{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - STSEndpoint: stsEndpoint, - Options: opts, - }), nil -} - -const defaultDurationSeconds = 3600 - -// closeResponse close non nil response with any response Body. -// convenient wrapper to drain any remaining data on response body. -// -// Subsequently this allows golang http RoundTripper -// to re-use the same connection for future requests. -func closeResponse(resp *http.Response) { - // Callers should close resp.Body when done reading from it. - // If resp.Body is not closed, the Client's underlying RoundTripper - // (typically Transport) may not be able to re-use a persistent TCP - // connection to the server for a subsequent "keep-alive" request. - if resp != nil && resp.Body != nil { - // Drain any remaining Body and then close the connection. - // Without this closing connection would disallow re-using - // the same connection for future uses. - // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - } -} - -func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) { - v := url.Values{} - v.Set("Action", "AssumeRole") - v.Set("Version", STSVersion) - if opts.RoleARN != "" { - v.Set("RoleArn", opts.RoleARN) - } - if opts.RoleSessionName != "" { - v.Set("RoleSessionName", opts.RoleSessionName) - } - if opts.DurationSeconds > defaultDurationSeconds { - v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds)) - } else { - v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds)) - } - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleResponse{}, err - } - u.Path = "/" - - postBody := strings.NewReader(v.Encode()) - hash := sha256.New() - if _, err = io.Copy(hash, postBody); err != nil { - return AssumeRoleResponse{}, err - } - postBody.Seek(0, 0) - - req, err := http.NewRequest(http.MethodPost, u.String(), postBody) - if err != nil { - return AssumeRoleResponse{}, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil))) - req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location) - - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleResponse{}, err - } - defer closeResponse(resp) - if resp.StatusCode != http.StatusOK { - return AssumeRoleResponse{}, errors.New(resp.Status) - } - - a := AssumeRoleResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleResponse{}, err - } - return a, nil -} - -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSAssumeRole) Retrieve() (Value, error) { - a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go deleted file mode 100644 index 6dc8e9d0..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/chain.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -// A Chain will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The Chain provides a way of chaining multiple providers together -// which will pick the first available using priority order of the -// Providers in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the no credentials value. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again after IsExpired() is true. -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } -// -type Chain struct { - Providers []Provider - curr Provider -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return New(&Chain{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value, returns no credentials(anonymous) -// if no credentials provider returned any value. -// -// If a provider is found with credentials, it will be cached and any calls -// to IsExpired() will return the expired state of the cached provider. -func (c *Chain) Retrieve() (Value, error) { - for _, p := range c.Providers { - creds, _ := p.Retrieve() - // Always prioritize non-anonymous providers, if any. - if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { - continue - } - c.curr = p - return creds, nil - } - // At this point we have exhausted all the providers and - // are left without any credentials return anonymous. - return Value{ - SignerType: SignatureAnonymous, - }, nil -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *Chain) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample deleted file mode 100644 index d793c9e0..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/config.json.sample +++ /dev/null @@ -1,17 +0,0 @@ -{ - "version": "8", - "hosts": { - "play": { - "url": "https://play.min.io", - "accessKey": "Q3AM3UQ867SPQQA43P2F", - "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", - "api": "S3v2" - }, - "s3": { - "url": "https://s3.amazonaws.com", - "accessKey": "accessKey", - "secretKey": "secret", - "api": "S3v4" - } - } -} \ No newline at end of file diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go deleted file mode 100644 index 62d1701e..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "sync" - "time" -) - -// STSVersion sts version string -const STSVersion = "2011-06-15" - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Signature Type. - SignerType SignatureType -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type IAMCredentialProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - return e.expiration.Before(e.CurrentTime()) -} - -// Credentials - A container for synchronous safe retrieval of credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - sync.Mutex - - creds Value - forceRefresh bool - provider Provider -} - -// New returns a pointer to a new Credentials with the provider set. -func New(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - if c == nil { - return Value{}, nil - } - - c.Lock() - defer c.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.Lock() - defer c.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be refreshed. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.Lock() - defer c.Unlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample deleted file mode 100644 index 7fc91d9d..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/credentials.sample +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go deleted file mode 100644 index 0c94477b..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package credentials provides credential retrieval and management -// for S3 compatible object storage. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := NewFromEnv() -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := NewFromIAM("") -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go deleted file mode 100644 index b6e60d0e..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_aws.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "os" - -// A EnvAWS retrieves credentials from the environment variables of the -// running process. EnvAWSironment credentials never expire. -// -// EnvAWSironment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. -// * Secret Token: AWS_SESSION_TOKEN. -type EnvAWS struct { - retrieved bool -} - -// NewEnvAWS returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvAWS() *Credentials { - return New(&EnvAWS{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvAWS) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - signerType := SignatureV4 - if id == "" || secret == "" { - signerType = SignatureAnonymous - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - SignerType: signerType, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvAWS) IsExpired() bool { - return !e.retrieved -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go deleted file mode 100644 index 5bfeab14..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/env_minio.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "os" - -// A EnvMinio retrieves credentials from the environment variables of the -// running process. EnvMinioironment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: MINIO_ACCESS_KEY. -// * Secret Access Key: MINIO_SECRET_KEY. -// * Access Key ID: MINIO_ROOT_USER. -// * Secret Access Key: MINIO_ROOT_PASSWORD. -type EnvMinio struct { - retrieved bool -} - -// NewEnvMinio returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvMinio() *Credentials { - return New(&EnvMinio{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvMinio) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("MINIO_ROOT_USER") - secret := os.Getenv("MINIO_ROOT_PASSWORD") - - signerType := SignatureV4 - if id == "" || secret == "" { - id = os.Getenv("MINIO_ACCESS_KEY") - secret = os.Getenv("MINIO_SECRET_KEY") - if id == "" || secret == "" { - signerType = SignatureAnonymous - } - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SignerType: signerType, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvMinio) IsExpired() bool { - return !e.retrieved -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go deleted file mode 100644 index ccc8251f..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_aws_credentials.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "os" - "path/filepath" - - homedir "github.com/mitchellh/go-homedir" - ini "gopkg.in/ini.v1" -) - -// A FileAWSCredentials retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type FileAWSCredentials struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewFileAWSCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewFileAWSCredentials(filename string, profile string) *Credentials { - return New(&FileAWSCredentials{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileAWSCredentials) Retrieve() (Value, error) { - if p.Filename == "" { - p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") - if p.Filename == "" { - homeDir, err := homedir.Dir() - if err != nil { - return Value{}, err - } - p.Filename = filepath.Join(homeDir, ".aws", "credentials") - } - } - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - if p.Profile == "" { - p.Profile = "default" - } - } - - p.retrieved = false - - iniProfile, err := loadProfile(p.Filename, p.Profile) - if err != nil { - return Value{}, err - } - - // Default to empty string if not found. - id := iniProfile.Key("aws_access_key_id") - // Default to empty string if not found. - secret := iniProfile.Key("aws_secret_access_key") - // Default to empty string if not found. - token := iniProfile.Key("aws_session_token") - - p.retrieved = true - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - SignerType: SignatureV4, - }, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *FileAWSCredentials) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (*ini.Section, error) { - config, err := ini.Load(filename) - if err != nil { - return nil, err - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return nil, err - } - return iniProfile, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go deleted file mode 100644 index ca6db005..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/file_minio_client.go +++ /dev/null @@ -1,135 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - - jsoniter "github.com/json-iterator/go" - homedir "github.com/mitchellh/go-homedir" -) - -// A FileMinioClient retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Configuration file example: $HOME/.mc/config.json -type FileMinioClient struct { - // Path to the shared credentials file. - // - // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.mc/config.json" - // Windows: "%USERALIAS%\mc\config.json" - Filename string - - // MinIO Alias to extract credentials from the shared credentials file. If empty - // will default to environment variable "MINIO_ALIAS" or "default" if - // environment variable is also not set. - Alias string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewFileMinioClient returns a pointer to a new Credentials object -// wrapping the Alias file provider. -func NewFileMinioClient(filename string, alias string) *Credentials { - return New(&FileMinioClient{ - Filename: filename, - Alias: alias, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileMinioClient) Retrieve() (Value, error) { - if p.Filename == "" { - if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { - p.Filename = value - } else { - homeDir, err := homedir.Dir() - if err != nil { - return Value{}, err - } - p.Filename = filepath.Join(homeDir, ".mc", "config.json") - if runtime.GOOS == "windows" { - p.Filename = filepath.Join(homeDir, "mc", "config.json") - } - } - } - - if p.Alias == "" { - p.Alias = os.Getenv("MINIO_ALIAS") - if p.Alias == "" { - p.Alias = "s3" - } - } - - p.retrieved = false - - hostCfg, err := loadAlias(p.Filename, p.Alias) - if err != nil { - return Value{}, err - } - - p.retrieved = true - return Value{ - AccessKeyID: hostCfg.AccessKey, - SecretAccessKey: hostCfg.SecretKey, - SignerType: parseSignatureType(hostCfg.API), - }, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *FileMinioClient) IsExpired() bool { - return !p.retrieved -} - -// hostConfig configuration of a host. -type hostConfig struct { - URL string `json:"url"` - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - API string `json:"api"` -} - -// config config version. -type config struct { - Version string `json:"version"` - Hosts map[string]hostConfig `json:"hosts"` -} - -// loadAliass loads from the file pointed to by shared credentials filename for alias. -// The credentials retrieved from the alias will be returned or error. Error will be -// returned if it fails to read from the file. -func loadAlias(filename, alias string) (hostConfig, error) { - cfg := &config{} - var json = jsoniter.ConfigCompatibleWithStandardLibrary - - configBytes, err := ioutil.ReadFile(filename) - if err != nil { - return hostConfig{}, err - } - if err = json.Unmarshal(configBytes, cfg); err != nil { - return hostConfig{}, err - } - return cfg.Hosts[alias], nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go deleted file mode 100644 index b532bcb6..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ /dev/null @@ -1,367 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bufio" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path" - "strings" - "time" - - jsoniter "github.com/json-iterator/go" -) - -// DefaultExpiryWindow - Default expiry window. -// ExpiryWindow will allow the credentials to trigger refreshing -// prior to the credentials actually expiring. This is beneficial -// so race conditions with expiring credentials do not cause -// request to fail unexpectedly due to ExpiredTokenException exceptions. -const DefaultExpiryWindow = time.Second * 10 // 10 secs - -// A IAM retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -type IAM struct { - Expiry - - // Required http Client to use when connecting to IAM metadata service. - Client *http.Client - - // Custom endpoint to fetch IAM role credentials. - Endpoint string -} - -// IAM Roles for Amazon EC2 -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -const ( - defaultIAMRoleEndpoint = "http://169.254.169.254" - defaultECSRoleEndpoint = "http://169.254.170.2" - defaultSTSRoleEndpoint = "https://sts.amazonaws.com" - defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/" - tokenRequestTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds" - tokenPath = "/latest/api/token" - tokenTTL = "21600" - tokenRequestHeader = "X-aws-ec2-metadata-token" -) - -// NewIAM returns a pointer to a new Credentials object wrapping the IAM. -func NewIAM(endpoint string) *Credentials { - return New(&IAM{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - Endpoint: endpoint, - }) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired -func (m *IAM) Retrieve() (Value, error) { - token := os.Getenv("AWS_CONTAINER_AUTHORIZATION_TOKEN") - var roleCreds ec2RoleCredRespBody - var err error - - endpoint := m.Endpoint - switch { - case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0: - if len(endpoint) == 0 { - if len(os.Getenv("AWS_REGION")) > 0 { - if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") { - endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com.cn" - } else { - endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com" - } - } else { - endpoint = defaultSTSRoleEndpoint - } - } - - creds := &STSWebIdentity{ - Client: m.Client, - STSEndpoint: endpoint, - GetWebIDTokenExpiry: func() (*WebIdentityToken, error) { - token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) - if err != nil { - return nil, err - } - - return &WebIdentityToken{Token: string(token)}, nil - }, - roleARN: os.Getenv("AWS_ROLE_ARN"), - roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"), - } - - stsWebIdentityCreds, err := creds.Retrieve() - if err == nil { - m.SetExpiration(creds.Expiration(), DefaultExpiryWindow) - } - return stsWebIdentityCreds, err - - case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0: - if len(endpoint) == 0 { - endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint, - os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) - } - - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) - - case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0: - if len(endpoint) == 0 { - endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI") - var ok bool - if ok, err = isLoopback(endpoint); !ok { - if err == nil { - err = fmt.Errorf("uri host is not a loopback address: %s", endpoint) - } - break - } - } - - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint, token) - - default: - roleCreds, err = getCredentials(m.Client, endpoint) - } - - if err != nil { - return Value{}, err - } - // Expiry window is set to 10secs. - m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - SignerType: SignatureV4, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string - - // Unused params. - LastUpdated time.Time - Type string -} - -// Get the final IAM role URL where the request will -// be sent to fetch the rolling access credentials. -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func getIAMRoleURL(endpoint string) (*url.URL, error) { - if endpoint == "" { - endpoint = defaultIAMRoleEndpoint - } - - u, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - u.Path = defaultIAMSecurityCredsPath - return u, nil -} - -// listRoleNames lists of credential role names associated -// with the current EC2 service. If there are no credentials, -// or there is an error making or receiving the request. -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func listRoleNames(client *http.Client, u *url.URL, token string) ([]string, error) { - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err - } - if token != "" { - req.Header.Add(tokenRequestHeader, token) - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, errors.New(resp.Status) - } - - credsList := []string{} - s := bufio.NewScanner(resp.Body) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, err - } - - return credsList, nil -} - -func getEcsTaskCredentials(client *http.Client, endpoint string, token string) (ec2RoleCredRespBody, error) { - req, err := http.NewRequest(http.MethodGet, endpoint, nil) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - if token != "" { - req.Header.Set("Authorization", token) - } - - resp, err := client.Do(req) - if err != nil { - return ec2RoleCredRespBody{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return ec2RoleCredRespBody{}, errors.New(resp.Status) - } - - respCreds := ec2RoleCredRespBody{} - if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, err - } - - return respCreds, nil -} - -func fetchIMDSToken(client *http.Client, endpoint string) (string, error) { - req, err := http.NewRequest(http.MethodPut, endpoint+tokenPath, nil) - if err != nil { - return "", err - } - req.Header.Add(tokenRequestTTLHeader, tokenTTL) - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode != http.StatusOK { - return "", errors.New(resp.Status) - } - return string(data), nil -} - -// getCredentials - obtains the credentials from the IAM role name associated with -// the current EC2 service. -// -// If the credentials cannot be found, or there is an error -// reading the response an error will be returned. -func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { - // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html - token, _ := fetchIMDSToken(client, endpoint) - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - u, err := getIAMRoleURL(endpoint) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - roleNames, err := listRoleNames(client, u, token) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - if len(roleNames) == 0 { - return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - // - An instance profile can contain only one IAM role. This limit cannot be increased. - roleName := roleNames[0] - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - // The following command retrieves the security credentials for an - // IAM role named `s3access`. - // - // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access - // - u.Path = path.Join(u.Path, roleName) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return ec2RoleCredRespBody{}, err - } - if token != "" { - req.Header.Add(tokenRequestHeader, token) - } - - resp, err := client.Do(req) - if err != nil { - return ec2RoleCredRespBody{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return ec2RoleCredRespBody{}, errors.New(resp.Status) - } - - respCreds := ec2RoleCredRespBody{} - if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, err - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, errors.New(respCreds.Message) - } - - return respCreds, nil -} - -// isLoopback identifies if a uri's host is on a loopback address -func isLoopback(uri string) (bool, error) { - u, err := url.Parse(uri) - if err != nil { - return false, err - } - - host := u.Hostname() - if len(host) == 0 { - return false, fmt.Errorf("can't parse host from uri: %s", uri) - } - - ips, err := net.LookupHost(host) - if err != nil { - return false, err - } - for _, ip := range ips { - if !net.ParseIP(ip).IsLoopback() { - return false, nil - } - } - - return true, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go deleted file mode 100644 index b7943330..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/signature-type.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "strings" - -// SignatureType is type of Authorization requested for a given HTTP request. -type SignatureType int - -// Different types of supported signatures - default is SignatureV4 or SignatureDefault. -const ( - // SignatureDefault is always set to v4. - SignatureDefault SignatureType = iota - SignatureV4 - SignatureV2 - SignatureV4Streaming - SignatureAnonymous // Anonymous signature signifies, no signature. -) - -// IsV2 - is signature SignatureV2? -func (s SignatureType) IsV2() bool { - return s == SignatureV2 -} - -// IsV4 - is signature SignatureV4? -func (s SignatureType) IsV4() bool { - return s == SignatureV4 || s == SignatureDefault -} - -// IsStreamingV4 - is signature SignatureV4Streaming? -func (s SignatureType) IsStreamingV4() bool { - return s == SignatureV4Streaming -} - -// IsAnonymous - is signature empty? -func (s SignatureType) IsAnonymous() bool { - return s == SignatureAnonymous -} - -// Stringer humanized version of signature type, -// strings returned here are case insensitive. -func (s SignatureType) String() string { - if s.IsV2() { - return "S3v2" - } else if s.IsV4() { - return "S3v4" - } else if s.IsStreamingV4() { - return "S3v4Streaming" - } - return "Anonymous" -} - -func parseSignatureType(str string) SignatureType { - if strings.EqualFold(str, "S3v4") { - return SignatureV4 - } else if strings.EqualFold(str, "S3v2") { - return SignatureV2 - } else if strings.EqualFold(str, "S3v4Streaming") { - return SignatureV4Streaming - } - return SignatureAnonymous -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go deleted file mode 100644 index 7dde00b0..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/static.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -// A Static is a set of credentials which are set programmatically, -// and will never expire. -type Static struct { - Value -} - -// NewStaticV2 returns a pointer to a new Credentials object -// wrapping a static credentials value provider, signature is -// set to v2. If access and secret are not specified then -// regardless of signature type set it Value will return -// as anonymous. -func NewStaticV2(id, secret, token string) *Credentials { - return NewStatic(id, secret, token, SignatureV2) -} - -// NewStaticV4 is similar to NewStaticV2 with similar considerations. -func NewStaticV4(id, secret, token string) *Credentials { - return NewStatic(id, secret, token, SignatureV4) -} - -// NewStatic returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { - return New(&Static{ - Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - SignerType: signerType, - }, - }) -} - -// Retrieve returns the static credentials. -func (s *Static) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - // Anonymous is not an error - return Value{SignerType: SignatureAnonymous}, nil - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For Static, the credentials never expired. -func (s *Static) IsExpired() bool { - return false -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go deleted file mode 100644 index b79f920f..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_client_grants.go +++ /dev/null @@ -1,162 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "time" -) - -// AssumedRoleUser - The identifiers for the temporary security credentials that -// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser -type AssumedRoleUser struct { - Arn string - AssumedRoleID string `xml:"AssumeRoleId"` -} - -// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. -type AssumeRoleWithClientGrantsResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` - Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants -// request, including temporary credentials that can be used to make MinIO API requests. -type ClientGrantsResult struct { - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - Audience string `xml:",omitempty"` - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - PackedPolicySize int `xml:",omitempty"` - Provider string `xml:",omitempty"` - SubjectFromClientGrantsToken string `xml:",omitempty"` -} - -// ClientGrantsToken - client grants token with expiry. -type ClientGrantsToken struct { - Token string - Expiry int -} - -// A STSClientGrants retrieves credentials from MinIO service, and keeps track if -// those credentials are expired. -type STSClientGrants struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // MinIO endpoint to fetch STS credentials. - STSEndpoint string - - // getClientGrantsTokenExpiry function to retrieve tokens - // from IDP This function should return two values one is - // accessToken which is a self contained access token (JWT) - // and second return value is the expiry associated with - // this token. This is a customer provided function and - // is mandatory. - GetClientGrantsTokenExpiry func() (*ClientGrantsToken, error) -} - -// NewSTSClientGrants returns a pointer to a new -// Credentials object wrapping the STSClientGrants. -func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if getClientGrantsTokenExpiry == nil { - return nil, errors.New("Client grants access token and expiry retrieval function should be defined") - } - return New(&STSClientGrants{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - STSEndpoint: stsEndpoint, - GetClientGrantsTokenExpiry: getClientGrantsTokenExpiry, - }), nil -} - -func getClientGrantsCredentials(clnt *http.Client, endpoint string, - getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { - - accessToken, err := getClientGrantsTokenExpiry() - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithClientGrants") - v.Set("Token", accessToken.Token) - v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry)) - v.Set("Version", STSVersion) - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodPost, u.String(), nil) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status) - } - - a := AssumeRoleWithClientGrantsResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - return a, nil -} - -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSClientGrants) Retrieve() (Value, error) { - a, err := getClientGrantsCredentials(m.Client, m.STSEndpoint, m.GetClientGrantsTokenExpiry) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go deleted file mode 100644 index bcb3c36a..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_ldap_identity.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "net/http" - "net/url" - "time" -) - -// AssumeRoleWithLDAPResponse contains the result of successful -// AssumeRoleWithLDAPIdentity request -type AssumeRoleWithLDAPResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"` - Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// LDAPIdentityResult - contains credentials for a successful -// AssumeRoleWithLDAPIdentity request. -type LDAPIdentityResult struct { - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - - SubjectFromToken string `xml:",omitempty"` -} - -// LDAPIdentity retrieves credentials from MinIO -type LDAPIdentity struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // Exported STS endpoint to fetch STS credentials. - STSEndpoint string - - // LDAP username/password used to fetch LDAP STS credentials. - LDAPUsername, LDAPPassword string -} - -// NewLDAPIdentity returns new credentials object that uses LDAP -// Identity. -func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) { - return New(&LDAPIdentity{ - Client: &http.Client{Transport: http.DefaultTransport}, - STSEndpoint: stsEndpoint, - LDAPUsername: ldapUsername, - LDAPPassword: ldapPassword, - }), nil -} - -// Retrieve gets the credential by calling the MinIO STS API for -// LDAP on the configured stsEndpoint. -func (k *LDAPIdentity) Retrieve() (value Value, err error) { - u, kerr := url.Parse(k.STSEndpoint) - if kerr != nil { - err = kerr - return - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithLDAPIdentity") - v.Set("Version", STSVersion) - v.Set("LDAPUsername", k.LDAPUsername) - v.Set("LDAPPassword", k.LDAPPassword) - - u.RawQuery = v.Encode() - - req, kerr := http.NewRequest(http.MethodPost, u.String(), nil) - if kerr != nil { - err = kerr - return - } - - resp, kerr := k.Client.Do(req) - if kerr != nil { - err = kerr - return - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - err = errors.New(resp.Status) - return - } - - r := AssumeRoleWithLDAPResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil { - return - } - - cr := r.Result.Credentials - k.SetExpiration(cr.Expiration, DefaultExpiryWindow) - return Value{ - AccessKeyID: cr.AccessKey, - SecretAccessKey: cr.SecretKey, - SessionToken: cr.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go deleted file mode 100644 index 161ffd36..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/credentials/sts_web_identity.go +++ /dev/null @@ -1,181 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "strconv" - "time" -) - -// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. -type AssumeRoleWithWebIdentityResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` - Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity -// request, including temporary credentials that can be used to make MinIO API requests. -type WebIdentityResult struct { - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - Audience string `xml:",omitempty"` - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - PackedPolicySize int `xml:",omitempty"` - Provider string `xml:",omitempty"` - SubjectFromWebIdentityToken string `xml:",omitempty"` -} - -// WebIdentityToken - web identity token with expiry. -type WebIdentityToken struct { - Token string - Expiry int -} - -// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if -// those credentials are expired. -type STSWebIdentity struct { - Expiry - - // Required http Client to use when connecting to MinIO STS service. - Client *http.Client - - // Exported STS endpoint to fetch STS credentials. - STSEndpoint string - - // Exported GetWebIDTokenExpiry function which returns ID - // tokens from IDP. This function should return two values - // one is ID token which is a self contained ID token (JWT) - // and second return value is the expiry associated with - // this token. - // This is a customer provided function and is mandatory. - GetWebIDTokenExpiry func() (*WebIdentityToken, error) - - // roleARN is the Amazon Resource Name (ARN) of the role that the caller is - // assuming. - roleARN string - - // roleSessionName is the identifier for the assumed role session. - roleSessionName string -} - -// NewSTSWebIdentity returns a pointer to a new -// Credentials object wrapping the STSWebIdentity. -func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if getWebIDTokenExpiry == nil { - return nil, errors.New("Web ID token and expiry retrieval function should be defined") - } - return New(&STSWebIdentity{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - STSEndpoint: stsEndpoint, - GetWebIDTokenExpiry: getWebIDTokenExpiry, - }), nil -} - -func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string, - getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { - idToken, err := getWebIDTokenExpiry() - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithWebIdentity") - if len(roleARN) > 0 { - v.Set("RoleArn", roleARN) - - if len(roleSessionName) == 0 { - roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10) - } - v.Set("RoleSessionName", roleSessionName) - } - v.Set("WebIdentityToken", idToken.Token) - if idToken.Expiry > 0 { - v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry)) - } - v.Set("Version", STSVersion) - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - u.RawQuery = v.Encode() - - req, err := http.NewRequest(http.MethodPost, u.String(), nil) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status) - } - - a := AssumeRoleWithWebIdentityResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - return a, nil -} - -// Retrieve retrieves credentials from the MinIO service. -// Error will be returned if the request fails. -func (m *STSWebIdentity) Retrieve() (Value, error) { - a, err := getWebIdentityCredentials(m.Client, m.STSEndpoint, m.roleARN, m.roleSessionName, m.GetWebIDTokenExpiry) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} - -// Expiration returns the expiration time of the credentials -func (m *STSWebIdentity) Expiration() time.Time { - return m.expiration -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go deleted file mode 100644 index ce7d2153..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/encrypt/server-side.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -import ( - "crypto/md5" - "encoding/base64" - "errors" - "net/http" - - jsoniter "github.com/json-iterator/go" - "golang.org/x/crypto/argon2" -) - -const ( - // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. - sseGenericHeader = "X-Amz-Server-Side-Encryption" - - // sseKmsKeyID is the AWS SSE-KMS key id. - sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" - // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. - sseEncryptionContext = sseGenericHeader + "-Context" - - // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. - sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" - // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. - sseCustomerKey = sseGenericHeader + "-Customer-Key" - // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. - sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" - - // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. - sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. - sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. - sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" -) - -// PBKDF creates a SSE-C key from the provided password and salt. -// PBKDF is a password-based key derivation function -// which can be used to derive a high-entropy cryptographic -// key from a low-entropy password and a salt. -type PBKDF func(password, salt []byte) ServerSide - -// DefaultPBKDF is the default PBKDF. It uses Argon2id with the -// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). -var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { - sse := ssec{} - copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) - return sse -} - -// Type is the server-side-encryption method. It represents one of -// the following encryption methods: -// - SSE-C: server-side-encryption with customer provided keys -// - KMS: server-side-encryption with managed keys -// - S3: server-side-encryption using S3 storage encryption -type Type string - -const ( - // SSEC represents server-side-encryption with customer provided keys - SSEC Type = "SSE-C" - // KMS represents server-side-encryption with managed keys - KMS Type = "KMS" - // S3 represents server-side-encryption using S3 storage encryption - S3 Type = "S3" -) - -// ServerSide is a form of S3 server-side-encryption. -type ServerSide interface { - // Type returns the server-side-encryption method. - Type() Type - - // Marshal adds encryption headers to the provided HTTP headers. - // It marks an HTTP request as server-side-encryption request - // and inserts the required data into the headers. - Marshal(h http.Header) -} - -// NewSSE returns a server-side-encryption using S3 storage encryption. -// Using SSE-S3 the server will encrypt the object with server-managed keys. -func NewSSE() ServerSide { return s3{} } - -// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. -func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { - if context == nil { - return kms{key: keyID, hasContext: false}, nil - } - var json = jsoniter.ConfigCompatibleWithStandardLibrary - serializedContext, err := json.Marshal(context) - if err != nil { - return nil, err - } - return kms{key: keyID, context: serializedContext, hasContext: true}, nil -} - -// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. -// The key must be 32 bytes long. -func NewSSEC(key []byte) (ServerSide, error) { - if len(key) != 32 { - return nil, errors.New("encrypt: SSE-C key must be 256 bit long") - } - sse := ssec{} - copy(sse[:], key) - return sse, nil -} - -// SSE transforms a SSE-C copy encryption into a SSE-C encryption. -// It is the inverse of SSECopy(...). -// -// If the provided sse is no SSE-C copy encryption SSE returns -// sse unmodified. -func SSE(sse ServerSide) ServerSide { - if sse == nil || sse.Type() != SSEC { - return sse - } - if sse, ok := sse.(ssecCopy); ok { - return ssec(sse) - } - return sse -} - -// SSECopy transforms a SSE-C encryption into a SSE-C copy -// encryption. This is required for SSE-C key rotation or a SSE-C -// copy where the source and the destination should be encrypted. -// -// If the provided sse is no SSE-C encryption SSECopy returns -// sse unmodified. -func SSECopy(sse ServerSide) ServerSide { - if sse == nil || sse.Type() != SSEC { - return sse - } - if sse, ok := sse.(ssec); ok { - return ssecCopy(sse) - } - return sse -} - -type ssec [32]byte - -func (s ssec) Type() Type { return SSEC } - -func (s ssec) Marshal(h http.Header) { - keyMD5 := md5.Sum(s[:]) - h.Set(sseCustomerAlgorithm, "AES256") - h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) - h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) -} - -type ssecCopy [32]byte - -func (s ssecCopy) Type() Type { return SSEC } - -func (s ssecCopy) Marshal(h http.Header) { - keyMD5 := md5.Sum(s[:]) - h.Set(sseCopyCustomerAlgorithm, "AES256") - h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) - h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) -} - -type s3 struct{} - -func (s s3) Type() Type { return S3 } - -func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } - -type kms struct { - key string - context []byte - hasContext bool -} - -func (s kms) Type() Type { return KMS } - -func (s kms) Marshal(h http.Header) { - h.Set(sseGenericHeader, "aws:kms") - if s.key != "" { - h.Set(sseKmsKeyID, s.key) - } - if s.hasContext { - h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) - } -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go deleted file mode 100644 index b6f9601b..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go +++ /dev/null @@ -1,303 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package lifecycle contains all the lifecycle related data types and marshallers. -package lifecycle - -import ( - "encoding/xml" - "time" -) - -// AbortIncompleteMultipartUpload structure, not supported yet on MinIO -type AbortIncompleteMultipartUpload struct { - XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"` - DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"` -} - -// IsDaysNull returns true if days field is null -func (n AbortIncompleteMultipartUpload) IsDaysNull() bool { - return n.DaysAfterInitiation == ExpirationDays(0) -} - -// MarshalXML if days after initiation is set to non-zero value -func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.IsDaysNull() { - return nil - } - type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload - return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start) -} - -// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire. -// Upon expiration, server permanently deletes the noncurrent object versions. -// Set this lifecycle configuration action on a bucket that has versioning enabled -// (or suspended) to request server delete noncurrent object versions at a -// specific period in the object's lifetime. -type NoncurrentVersionExpiration struct { - XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"` - NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"` -} - -// MarshalXML if non-current days not set to non zero value -func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.IsDaysNull() { - return nil - } - type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration - return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start) -} - -// IsDaysNull returns true if days field is null -func (n NoncurrentVersionExpiration) IsDaysNull() bool { - return n.NoncurrentDays == ExpirationDays(0) -} - -// NoncurrentVersionTransition structure, set this action to request server to -// transition noncurrent object versions to different set storage classes -// at a specific period in the object's lifetime. -type NoncurrentVersionTransition struct { - XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"` - StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` - NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"` -} - -// IsDaysNull returns true if days field is null -func (n NoncurrentVersionTransition) IsDaysNull() bool { - return n.NoncurrentDays == ExpirationDays(0) -} - -// IsStorageClassEmpty returns true if storage class field is empty -func (n NoncurrentVersionTransition) IsStorageClassEmpty() bool { - return n.StorageClass == "" -} - -// MarshalXML is extended to leave out -// tags -func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if n.IsDaysNull() || n.IsStorageClassEmpty() { - return nil - } - type noncurrentVersionTransitionWrapper NoncurrentVersionTransition - return e.EncodeElement(noncurrentVersionTransitionWrapper(n), start) -} - -// Tag structure key/value pair representing an object tag to apply lifecycle configuration -type Tag struct { - XMLName xml.Name `xml:"Tag,omitempty" json:"-"` - Key string `xml:"Key,omitempty" json:"Key,omitempty"` - Value string `xml:"Value,omitempty" json:"Value,omitempty"` -} - -// IsEmpty returns whether this tag is empty or not. -func (tag Tag) IsEmpty() bool { - return tag.Key == "" -} - -// Transition structure - transition details of lifecycle configuration -type Transition struct { - XMLName xml.Name `xml:"Transition" json:"-"` - Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` - StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` - Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` -} - -// IsDaysNull returns true if days field is null -func (t Transition) IsDaysNull() bool { - return t.Days == ExpirationDays(0) -} - -// IsDateNull returns true if date field is null -func (t Transition) IsDateNull() bool { - return t.Date.Time.IsZero() -} - -// IsNull returns true if both date and days fields are null -func (t Transition) IsNull() bool { - return t.IsDaysNull() && t.IsDateNull() -} - -// MarshalXML is transition is non null -func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { - if t.IsNull() { - return nil - } - type transitionWrapper Transition - return en.EncodeElement(transitionWrapper(t), startElement) -} - -// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter -type And struct { - XMLName xml.Name `xml:"And" json:"-"` - Prefix string `xml:"Prefix" json:"Prefix,omitempty"` - Tags []Tag `xml:"Tag" json:"Tags,omitempty"` -} - -// IsEmpty returns true if Tags field is null -func (a And) IsEmpty() bool { - return len(a.Tags) == 0 && a.Prefix == "" -} - -// Filter will be used in selecting rule(s) for lifecycle configuration -type Filter struct { - XMLName xml.Name `xml:"Filter" json:"-"` - And And `xml:"And,omitempty" json:"And,omitempty"` - Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` - Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` -} - -// MarshalXML - produces the xml representation of the Filter struct -// only one of Prefix, And and Tag should be present in the output. -func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - switch { - case !f.And.IsEmpty(): - if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { - return err - } - case !f.Tag.IsEmpty(): - if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { - return err - } - default: - // Always print Prefix field when both And & Tag are empty - if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// ExpirationDays is a type alias to unmarshal Days in Expiration -type ExpirationDays int - -// MarshalXML encodes number of days to expire if it is non-zero and -// encodes empty string otherwise -func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if eDays == 0 { - return nil - } - return e.EncodeElement(int(eDays), startElement) -} - -// ExpirationDate is a embedded type containing time.Time to unmarshal -// Date in Expiration -type ExpirationDate struct { - time.Time -} - -// MarshalXML encodes expiration date if it is non-zero and encodes -// empty string otherwise -func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if eDate.Time.IsZero() { - return nil - } - return e.EncodeElement(eDate.Format(time.RFC3339), startElement) -} - -// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element. -type ExpireDeleteMarker bool - -// MarshalXML encodes delete marker boolean into an XML form. -func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if !b { - return nil - } - type expireDeleteMarkerWrapper ExpireDeleteMarker - return e.EncodeElement(expireDeleteMarkerWrapper(b), startElement) -} - -// IsEnabled returns true if the auto delete-marker expiration is enabled -func (b ExpireDeleteMarker) IsEnabled() bool { - return bool(b) -} - -// Expiration structure - expiration details of lifecycle configuration -type Expiration struct { - XMLName xml.Name `xml:"Expiration,omitempty" json:"-"` - Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"` - Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"` - DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"` -} - -// IsDaysNull returns true if days field is null -func (e Expiration) IsDaysNull() bool { - return e.Days == ExpirationDays(0) -} - -// IsDateNull returns true if date field is null -func (e Expiration) IsDateNull() bool { - return e.Date.Time.IsZero() -} - -// IsDeleteMarkerExpirationEnabled returns true if the auto-expiration of delete marker is enabled -func (e Expiration) IsDeleteMarkerExpirationEnabled() bool { - return e.DeleteMarker.IsEnabled() -} - -// IsNull returns true if both date and days fields are null -func (e Expiration) IsNull() bool { - return e.IsDaysNull() && e.IsDateNull() && !e.IsDeleteMarkerExpirationEnabled() -} - -// MarshalXML is expiration is non null -func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error { - if e.IsNull() { - return nil - } - type expirationWrapper Expiration - return en.EncodeElement(expirationWrapper(e), startElement) -} - -// Rule represents a single rule in lifecycle configuration -type Rule struct { - XMLName xml.Name `xml:"Rule,omitempty" json:"-"` - AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"` - Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"` - ID string `xml:"ID" json:"ID"` - RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"` - NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"` - NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"` - Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` - Status string `xml:"Status" json:"Status"` - Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"` -} - -// Configuration is a collection of Rule objects. -type Configuration struct { - XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"` - Rules []Rule `xml:"Rule"` -} - -// Empty check if lifecycle configuration is empty -func (c *Configuration) Empty() bool { - if c == nil { - return true - } - return len(c.Rules) == 0 -} - -// NewConfiguration initializes a fresh lifecycle configuration -// for manipulation, such as setting and removing lifecycle rules -// and filters. -func NewConfiguration() *Configuration { - return &Configuration{} -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go deleted file mode 100644 index d0a47163..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/notification/info.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package notification - -// Indentity represents the user id, this is a compliance field. -type identity struct { - PrincipalID string `json:"principalId"` -} - -// event bucket metadata. -type bucketMeta struct { - Name string `json:"name"` - OwnerIdentity identity `json:"ownerIdentity"` - ARN string `json:"arn"` -} - -// event object metadata. -type objectMeta struct { - Key string `json:"key"` - Size int64 `json:"size,omitempty"` - ETag string `json:"eTag,omitempty"` - ContentType string `json:"contentType,omitempty"` - UserMetadata map[string]string `json:"userMetadata,omitempty"` - VersionID string `json:"versionId,omitempty"` - Sequencer string `json:"sequencer"` -} - -// event server specific metadata. -type eventMeta struct { - SchemaVersion string `json:"s3SchemaVersion"` - ConfigurationID string `json:"configurationId"` - Bucket bucketMeta `json:"bucket"` - Object objectMeta `json:"object"` -} - -// sourceInfo represents information on the client that -// triggered the event notification. -type sourceInfo struct { - Host string `json:"host"` - Port string `json:"port"` - UserAgent string `json:"userAgent"` -} - -// Event represents an Amazon an S3 bucket notification event. -type Event struct { - EventVersion string `json:"eventVersion"` - EventSource string `json:"eventSource"` - AwsRegion string `json:"awsRegion"` - EventTime string `json:"eventTime"` - EventName string `json:"eventName"` - UserIdentity identity `json:"userIdentity"` - RequestParameters map[string]string `json:"requestParameters"` - ResponseElements map[string]string `json:"responseElements"` - S3 eventMeta `json:"s3"` - Source sourceInfo `json:"source"` -} - -// Info - represents the collection of notification events, additionally -// also reports errors if any while listening on bucket notifications. -type Info struct { - Records []Event - Err error -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go deleted file mode 100644 index b17e6c54..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/notification/notification.go +++ /dev/null @@ -1,395 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package notification - -import ( - "encoding/xml" - "errors" - "fmt" - - "github.com/minio/minio-go/v7/pkg/set" -) - -// EventType is a S3 notification event associated to the bucket notification configuration -type EventType string - -// The role of all event types are described in : -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations -const ( - ObjectCreatedAll EventType = "s3:ObjectCreated:*" - ObjectCreatedPut = "s3:ObjectCreated:Put" - ObjectCreatedPost = "s3:ObjectCreated:Post" - ObjectCreatedCopy = "s3:ObjectCreated:Copy" - ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - ObjectAccessedGet = "s3:ObjectAccessed:Get" - ObjectAccessedHead = "s3:ObjectAccessed:Head" - ObjectAccessedAll = "s3:ObjectAccessed:*" - ObjectRemovedAll = "s3:ObjectRemoved:*" - ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - BucketCreatedAll = "s3:BucketCreated:*" - BucketRemovedAll = "s3:BucketRemoved:*" -) - -// FilterRule - child of S3Key, a tag in the notification xml which -// carries suffix/prefix filters -type FilterRule struct { - Name string `xml:"Name"` - Value string `xml:"Value"` -} - -// S3Key - child of Filter, a tag in the notification xml which -// carries suffix/prefix filters -type S3Key struct { - FilterRules []FilterRule `xml:"FilterRule,omitempty"` -} - -// Filter - a tag in the notification xml structure which carries -// suffix/prefix filters -type Filter struct { - S3Key S3Key `xml:"S3Key,omitempty"` -} - -// Arn - holds ARN information that will be sent to the web service, -// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -type Arn struct { - Partition string - Service string - Region string - AccountID string - Resource string -} - -// NewArn creates new ARN based on the given partition, service, region, account id and resource -func NewArn(partition, service, region, accountID, resource string) Arn { - return Arn{Partition: partition, - Service: service, - Region: region, - AccountID: accountID, - Resource: resource} -} - -// String returns the string format of the ARN -func (arn Arn) String() string { - return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource -} - -// Config - represents one single notification configuration -// such as topic, queue or lambda configuration. -type Config struct { - ID string `xml:"Id,omitempty"` - Arn Arn `xml:"-"` - Events []EventType `xml:"Event"` - Filter *Filter `xml:"Filter,omitempty"` -} - -// NewConfig creates one notification config and sets the given ARN -func NewConfig(arn Arn) Config { - return Config{Arn: arn, Filter: &Filter{}} -} - -// AddEvents adds one event to the current notification config -func (t *Config) AddEvents(events ...EventType) { - t.Events = append(t.Events, events...) -} - -// AddFilterSuffix sets the suffix configuration to the current notification config -func (t *Config) AddFilterSuffix(suffix string) { - if t.Filter == nil { - t.Filter = &Filter{} - } - newFilterRule := FilterRule{Name: "suffix", Value: suffix} - // Replace any suffix rule if existing and add to the list otherwise - for index := range t.Filter.S3Key.FilterRules { - if t.Filter.S3Key.FilterRules[index].Name == "suffix" { - t.Filter.S3Key.FilterRules[index] = newFilterRule - return - } - } - t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) -} - -// AddFilterPrefix sets the prefix configuration to the current notification config -func (t *Config) AddFilterPrefix(prefix string) { - if t.Filter == nil { - t.Filter = &Filter{} - } - newFilterRule := FilterRule{Name: "prefix", Value: prefix} - // Replace any prefix rule if existing and add to the list otherwise - for index := range t.Filter.S3Key.FilterRules { - if t.Filter.S3Key.FilterRules[index].Name == "prefix" { - t.Filter.S3Key.FilterRules[index] = newFilterRule - return - } - } - t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) -} - -// EqualEventTypeList tells whether a and b contain the same events -func EqualEventTypeList(a, b []EventType) bool { - if len(a) != len(b) { - return false - } - setA := set.NewStringSet() - for _, i := range a { - setA.Add(string(i)) - } - - setB := set.NewStringSet() - for _, i := range b { - setB.Add(string(i)) - } - - return setA.Difference(setB).IsEmpty() -} - -// EqualFilterRuleList tells whether a and b contain the same filters -func EqualFilterRuleList(a, b []FilterRule) bool { - if len(a) != len(b) { - return false - } - - setA := set.NewStringSet() - for _, i := range a { - setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) - } - - setB := set.NewStringSet() - for _, i := range b { - setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value)) - } - - return setA.Difference(setB).IsEmpty() -} - -// Equal returns whether this `Config` is equal to another defined by the passed parameters -func (t *Config) Equal(events []EventType, prefix, suffix string) bool { - if t == nil { - return false - } - - // Compare events - passEvents := EqualEventTypeList(t.Events, events) - - // Compare filters - var newFilterRules []FilterRule - if prefix != "" { - newFilterRules = append(newFilterRules, FilterRule{Name: "prefix", Value: prefix}) - } - if suffix != "" { - newFilterRules = append(newFilterRules, FilterRule{Name: "suffix", Value: suffix}) - } - - var currentFilterRules []FilterRule - if t.Filter != nil { - currentFilterRules = t.Filter.S3Key.FilterRules - } - - passFilters := EqualFilterRuleList(currentFilterRules, newFilterRules) - return passEvents && passFilters -} - -// TopicConfig carries one single topic notification configuration -type TopicConfig struct { - Config - Topic string `xml:"Topic"` -} - -// QueueConfig carries one single queue notification configuration -type QueueConfig struct { - Config - Queue string `xml:"Queue"` -} - -// LambdaConfig carries one single cloudfunction notification configuration -type LambdaConfig struct { - Config - Lambda string `xml:"CloudFunction"` -} - -// Configuration - the struct that represents the whole XML to be sent to the web service -type Configuration struct { - XMLName xml.Name `xml:"NotificationConfiguration"` - LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` - TopicConfigs []TopicConfig `xml:"TopicConfiguration"` - QueueConfigs []QueueConfig `xml:"QueueConfiguration"` -} - -// AddTopic adds a given topic config to the general bucket notification config -func (b *Configuration) AddTopic(topicConfig Config) bool { - newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()} - for _, n := range b.TopicConfigs { - // If new config matches existing one - if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range topicConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) - return true -} - -// AddQueue adds a given queue config to the general bucket notification config -func (b *Configuration) AddQueue(queueConfig Config) bool { - newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()} - for _, n := range b.QueueConfigs { - if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range queueConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) - return true -} - -// AddLambda adds a given lambda config to the general bucket notification config -func (b *Configuration) AddLambda(lambdaConfig Config) bool { - newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()} - for _, n := range b.LambdaConfigs { - if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range lambdaConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) - return true -} - -// RemoveTopicByArn removes all topic configurations that match the exact specified ARN -func (b *Configuration) RemoveTopicByArn(arn Arn) { - var topics []TopicConfig - for _, topic := range b.TopicConfigs { - if topic.Topic != arn.String() { - topics = append(topics, topic) - } - } - b.TopicConfigs = topics -} - -// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete -var ErrNoConfigMatch = errors.New("no notification configuration matched") - -// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix -func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { - removeIndex := -1 - for i, v := range b.TopicConfigs { - // if it matches events and filters, mark the index for deletion - if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) { - removeIndex = i - break // since we have at most one matching config - } - } - if removeIndex >= 0 { - b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...) - return nil - } - return ErrNoConfigMatch -} - -// RemoveQueueByArn removes all queue configurations that match the exact specified ARN -func (b *Configuration) RemoveQueueByArn(arn Arn) { - var queues []QueueConfig - for _, queue := range b.QueueConfigs { - if queue.Queue != arn.String() { - queues = append(queues, queue) - } - } - b.QueueConfigs = queues -} - -// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix -func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { - removeIndex := -1 - for i, v := range b.QueueConfigs { - // if it matches events and filters, mark the index for deletion - if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) { - removeIndex = i - break // since we have at most one matching config - } - } - if removeIndex >= 0 { - b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...) - return nil - } - return ErrNoConfigMatch -} - -// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN -func (b *Configuration) RemoveLambdaByArn(arn Arn) { - var lambdas []LambdaConfig - for _, lambda := range b.LambdaConfigs { - if lambda.Lambda != arn.String() { - lambdas = append(lambdas, lambda) - } - } - b.LambdaConfigs = lambdas -} - -// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix -func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error { - removeIndex := -1 - for i, v := range b.LambdaConfigs { - // if it matches events and filters, mark the index for deletion - if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) { - removeIndex = i - break // since we have at most one matching config - } - } - if removeIndex >= 0 { - b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...) - return nil - } - return ErrNoConfigMatch -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go deleted file mode 100644 index beacc71f..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ /dev/null @@ -1,696 +0,0 @@ -/* - * MinIO Client (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package replication - -import ( - "bytes" - "encoding/xml" - "fmt" - "strconv" - "strings" - "unicode/utf8" - - "github.com/rs/xid" -) - -var errInvalidFilter = fmt.Errorf("invalid filter") - -// OptionType specifies operation to be performed on config -type OptionType string - -const ( - // AddOption specifies addition of rule to config - AddOption OptionType = "Add" - // SetOption specifies modification of existing rule to config - SetOption OptionType = "Set" - - // RemoveOption specifies rule options are for removing a rule - RemoveOption OptionType = "Remove" - // ImportOption is for getting current config - ImportOption OptionType = "Import" -) - -// Options represents options to set a replication configuration rule -type Options struct { - Op OptionType - ID string - Prefix string - RuleStatus string - Priority string - TagString string - StorageClass string - RoleArn string - DestBucket string - IsTagSet bool - IsSCSet bool - ReplicateDeletes string // replicate versioned deletes - ReplicateDeleteMarkers string // replicate soft deletes - ReplicaSync string // replicate replica metadata modifications - ExistingObjectReplicate string -} - -// Tags returns a slice of tags for a rule -func (opts Options) Tags() ([]Tag, error) { - var tagList []Tag - tagTokens := strings.Split(opts.TagString, "&") - for _, tok := range tagTokens { - if tok == "" { - break - } - kv := strings.SplitN(tok, "=", 2) - if len(kv) != 2 { - return []Tag{}, fmt.Errorf("tags should be entered as comma separated k=v pairs") - } - tagList = append(tagList, Tag{ - Key: kv[0], - Value: kv[1], - }) - } - return tagList, nil -} - -// Config - replication configuration specified in -// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html -type Config struct { - XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"` - Rules []Rule `xml:"Rule" json:"Rules"` - Role string `xml:"Role" json:"Role"` -} - -// Empty returns true if config is not set -func (c *Config) Empty() bool { - return len(c.Rules) == 0 -} - -// AddRule adds a new rule to existing replication config. If a rule exists with the -// same ID, then the rule is replaced. -func (c *Config) AddRule(opts Options) error { - priority, err := strconv.Atoi(opts.Priority) - if err != nil { - return err - } - if opts.RoleArn != c.Role && c.Role != "" { - return fmt.Errorf("role ARN does not match existing configuration") - } - var status Status - // toggle rule status for edit option - switch opts.RuleStatus { - case "enable": - status = Enabled - case "disable": - status = Disabled - default: - return fmt.Errorf("rule state should be either [enable|disable]") - } - - tags, err := opts.Tags() - if err != nil { - return err - } - andVal := And{ - Tags: tags, - } - filter := Filter{Prefix: opts.Prefix} - // only a single tag is set. - if opts.Prefix == "" && len(tags) == 1 { - filter.Tag = tags[0] - } - // both prefix and tag are present - if len(andVal.Tags) > 1 || opts.Prefix != "" { - filter.And = andVal - filter.And.Prefix = opts.Prefix - filter.Prefix = "" - filter.Tag = Tag{} - } - if opts.ID == "" { - opts.ID = xid.New().String() - } - arnStr := opts.RoleArn - if opts.RoleArn == "" { - arnStr = c.Role - } - if arnStr == "" { - return fmt.Errorf("role ARN required") - } - tokens := strings.Split(arnStr, ":") - if len(tokens) != 6 { - return fmt.Errorf("invalid format for replication Arn") - } - if c.Role == "" { - c.Role = arnStr - } - destBucket := opts.DestBucket - // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html - if btokens := strings.Split(destBucket, ":"); len(btokens) != 6 { - if len(btokens) == 1 { - destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) - } else { - return fmt.Errorf("destination bucket needs to be in Arn format") - } - } - dmStatus := Disabled - if opts.ReplicateDeleteMarkers != "" { - switch opts.ReplicateDeleteMarkers { - case "enable": - dmStatus = Enabled - case "disable": - dmStatus = Disabled - default: - return fmt.Errorf("ReplicateDeleteMarkers should be either enable|disable") - } - } - - vDeleteStatus := Disabled - if opts.ReplicateDeletes != "" { - switch opts.ReplicateDeletes { - case "enable": - vDeleteStatus = Enabled - case "disable": - vDeleteStatus = Disabled - default: - return fmt.Errorf("ReplicateDeletes should be either enable|disable") - } - } - var replicaSync Status - // replica sync is by default Enabled, unless specified. - switch opts.ReplicaSync { - case "enable", "": - replicaSync = Enabled - case "disable": - replicaSync = Disabled - default: - return fmt.Errorf("replica metadata sync should be either [enable|disable]") - } - - var existingStatus Status - if opts.ExistingObjectReplicate != "" { - switch opts.ExistingObjectReplicate { - case "enable": - existingStatus = Enabled - case "disable", "": - existingStatus = Disabled - default: - return fmt.Errorf("existingObjectReplicate should be either enable|disable") - } - } - newRule := Rule{ - ID: opts.ID, - Priority: priority, - Status: status, - Filter: filter, - Destination: Destination{ - Bucket: destBucket, - StorageClass: opts.StorageClass, - }, - DeleteMarkerReplication: DeleteMarkerReplication{Status: dmStatus}, - DeleteReplication: DeleteReplication{Status: vDeleteStatus}, - // MinIO enables replica metadata syncing by default in the case of bi-directional replication to allow - // automatic failover as the expectation in this case is that replica and source should be identical. - // However AWS leaves this configurable https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-for-metadata-changes.html - SourceSelectionCriteria: SourceSelectionCriteria{ - ReplicaModifications: ReplicaModifications{ - Status: replicaSync, - }, - }, - // By default disable existing object replication unless selected - ExistingObjectReplication: ExistingObjectReplication{ - Status: existingStatus, - }, - } - - // validate rule after overlaying priority for pre-existing rule being disabled. - if err := newRule.Validate(); err != nil { - return err - } - for _, rule := range c.Rules { - if rule.Priority == newRule.Priority { - return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") - } - if rule.Destination.Bucket != newRule.Destination.Bucket { - return fmt.Errorf("the destination bucket must be same for all rules") - } - if rule.ID == newRule.ID { - return fmt.Errorf("a rule exists with this ID") - } - } - - c.Rules = append(c.Rules, newRule) - return nil -} - -// EditRule modifies an existing rule in replication config -func (c *Config) EditRule(opts Options) error { - if opts.ID == "" { - return fmt.Errorf("rule ID missing") - } - rIdx := -1 - var newRule Rule - for i, rule := range c.Rules { - if rule.ID == opts.ID { - rIdx = i - newRule = rule - break - } - } - if rIdx < 0 { - return fmt.Errorf("rule with ID %s not found in replication configuration", opts.ID) - } - prefixChg := opts.Prefix != newRule.Prefix() - if opts.IsTagSet || prefixChg { - prefix := newRule.Prefix() - if prefix != opts.Prefix { - prefix = opts.Prefix - } - tags := []Tag{newRule.Filter.Tag} - if len(newRule.Filter.And.Tags) != 0 { - tags = newRule.Filter.And.Tags - } - var err error - if opts.IsTagSet { - tags, err = opts.Tags() - if err != nil { - return err - } - } - andVal := And{ - Tags: tags, - } - - filter := Filter{Prefix: prefix} - // only a single tag is set. - if prefix == "" && len(tags) == 1 { - filter.Tag = tags[0] - } - // both prefix and tag are present - if len(andVal.Tags) > 1 || prefix != "" { - filter.And = andVal - filter.And.Prefix = prefix - filter.Prefix = "" - filter.Tag = Tag{} - } - newRule.Filter = filter - } - - // toggle rule status for edit option - if opts.RuleStatus != "" { - switch opts.RuleStatus { - case "enable": - newRule.Status = Enabled - case "disable": - newRule.Status = Disabled - default: - return fmt.Errorf("rule state should be either [enable|disable]") - } - } - // set DeleteMarkerReplication rule status for edit option - if opts.ReplicateDeleteMarkers != "" { - switch opts.ReplicateDeleteMarkers { - case "enable": - newRule.DeleteMarkerReplication.Status = Enabled - case "disable": - newRule.DeleteMarkerReplication.Status = Disabled - default: - return fmt.Errorf("ReplicateDeleteMarkers state should be either [enable|disable]") - } - } - - // set DeleteReplication rule status for edit option. This is a MinIO specific - // option to replicate versioned deletes - if opts.ReplicateDeletes != "" { - switch opts.ReplicateDeletes { - case "enable": - newRule.DeleteReplication.Status = Enabled - case "disable": - newRule.DeleteReplication.Status = Disabled - default: - return fmt.Errorf("ReplicateDeletes state should be either [enable|disable]") - } - } - - if opts.ReplicaSync != "" { - switch opts.ReplicaSync { - case "enable", "": - newRule.SourceSelectionCriteria.ReplicaModifications.Status = Enabled - case "disable": - newRule.SourceSelectionCriteria.ReplicaModifications.Status = Disabled - default: - return fmt.Errorf("replica metadata sync should be either [enable|disable]") - } - } - fmt.Println("opts.ExistingObjectReplicate>", opts.ExistingObjectReplicate) - if opts.ExistingObjectReplicate != "" { - switch opts.ExistingObjectReplicate { - case "enable": - newRule.ExistingObjectReplication.Status = Enabled - case "disable": - newRule.ExistingObjectReplication.Status = Disabled - default: - return fmt.Errorf("existingObjectsReplication state should be either [enable|disable]") - } - } - if opts.IsSCSet { - newRule.Destination.StorageClass = opts.StorageClass - } - if opts.Priority != "" { - priority, err := strconv.Atoi(opts.Priority) - if err != nil { - return err - } - newRule.Priority = priority - } - if opts.DestBucket != "" { - destBucket := opts.DestBucket - // ref https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html - if btokens := strings.Split(opts.DestBucket, ":"); len(btokens) != 6 { - if len(btokens) == 1 { - destBucket = fmt.Sprintf("arn:aws:s3:::%s", destBucket) - } else { - return fmt.Errorf("destination bucket needs to be in Arn format") - } - } - newRule.Destination.Bucket = destBucket - } - // validate rule - if err := newRule.Validate(); err != nil { - return err - } - // ensure priority and destination bucket restrictions are not violated - for idx, rule := range c.Rules { - if rule.Priority == newRule.Priority && rIdx != idx { - return fmt.Errorf("priority must be unique. Replication configuration already has a rule with this priority") - } - if rule.Destination.Bucket != newRule.Destination.Bucket { - return fmt.Errorf("the destination bucket must be same for all rules") - } - } - - c.Rules[rIdx] = newRule - return nil -} - -// RemoveRule removes a rule from replication config. -func (c *Config) RemoveRule(opts Options) error { - var newRules []Rule - ruleFound := false - for _, rule := range c.Rules { - if rule.ID != opts.ID { - newRules = append(newRules, rule) - continue - } - ruleFound = true - } - if !ruleFound { - return fmt.Errorf("Rule with ID %s not found", opts.ID) - } - if len(newRules) == 0 { - return fmt.Errorf("replication configuration should have at least one rule") - } - c.Rules = newRules - return nil - -} - -// Rule - a rule for replication configuration. -type Rule struct { - XMLName xml.Name `xml:"Rule" json:"-"` - ID string `xml:"ID,omitempty"` - Status Status `xml:"Status"` - Priority int `xml:"Priority"` - DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"` - DeleteReplication DeleteReplication `xml:"DeleteReplication"` - Destination Destination `xml:"Destination"` - Filter Filter `xml:"Filter" json:"Filter"` - SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` - ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` -} - -// Validate validates the rule for correctness -func (r Rule) Validate() error { - if err := r.validateID(); err != nil { - return err - } - if err := r.validateStatus(); err != nil { - return err - } - if err := r.validateFilter(); err != nil { - return err - } - - if r.Priority < 0 && r.Status == Enabled { - return fmt.Errorf("priority must be set for the rule") - } - - if err := r.validateStatus(); err != nil { - return err - } - return r.ExistingObjectReplication.Validate() -} - -// validateID - checks if ID is valid or not. -func (r Rule) validateID() error { - // cannot be longer than 255 characters - if len(r.ID) > 255 { - return fmt.Errorf("ID must be less than 255 characters") - } - return nil -} - -// validateStatus - checks if status is valid or not. -func (r Rule) validateStatus() error { - // Status can't be empty - if len(r.Status) == 0 { - return fmt.Errorf("status cannot be empty") - } - - // Status must be one of Enabled or Disabled - if r.Status != Enabled && r.Status != Disabled { - return fmt.Errorf("status must be set to either Enabled or Disabled") - } - return nil -} - -func (r Rule) validateFilter() error { - if err := r.Filter.Validate(); err != nil { - return err - } - return nil -} - -// Prefix - a rule can either have prefix under or under -// . This method returns the prefix from the -// location where it is available -func (r Rule) Prefix() string { - if r.Filter.Prefix != "" { - return r.Filter.Prefix - } - return r.Filter.And.Prefix -} - -// Tags - a rule can either have tag under or under -// . This method returns all the tags from the -// rule in the format tag1=value1&tag2=value2 -func (r Rule) Tags() string { - ts := []Tag{r.Filter.Tag} - if len(r.Filter.And.Tags) != 0 { - ts = r.Filter.And.Tags - } - - var buf bytes.Buffer - for _, t := range ts { - if buf.Len() > 0 { - buf.WriteString("&") - } - buf.WriteString(t.String()) - } - return buf.String() -} - -// Filter - a filter for a replication configuration Rule. -type Filter struct { - XMLName xml.Name `xml:"Filter" json:"-"` - Prefix string `json:"Prefix,omitempty"` - And And `xml:"And,omitempty" json:"And,omitempty"` - Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` -} - -// Validate - validates the filter element -func (f Filter) Validate() error { - // A Filter must have exactly one of Prefix, Tag, or And specified. - if !f.And.isEmpty() { - if f.Prefix != "" { - return errInvalidFilter - } - if !f.Tag.IsEmpty() { - return errInvalidFilter - } - } - if f.Prefix != "" { - if !f.Tag.IsEmpty() { - return errInvalidFilter - } - } - if !f.Tag.IsEmpty() { - if err := f.Tag.Validate(); err != nil { - return err - } - } - return nil -} - -// Tag - a tag for a replication configuration Rule filter. -type Tag struct { - XMLName xml.Name `json:"-"` - Key string `xml:"Key,omitempty" json:"Key,omitempty"` - Value string `xml:"Value,omitempty" json:"Value,omitempty"` -} - -func (tag Tag) String() string { - if tag.IsEmpty() { - return "" - } - return tag.Key + "=" + tag.Value -} - -// IsEmpty returns whether this tag is empty or not. -func (tag Tag) IsEmpty() bool { - return tag.Key == "" -} - -// Validate checks this tag. -func (tag Tag) Validate() error { - if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { - return fmt.Errorf("invalid Tag Key") - } - - if utf8.RuneCountInString(tag.Value) > 256 { - return fmt.Errorf("invalid Tag Value") - } - return nil -} - -// Destination - destination in ReplicationConfiguration. -type Destination struct { - XMLName xml.Name `xml:"Destination" json:"-"` - Bucket string `xml:"Bucket" json:"Bucket"` - StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"` -} - -// And - a tag to combine a prefix and multiple tags for replication configuration rule. -type And struct { - XMLName xml.Name `xml:"And,omitempty" json:"-"` - Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"` - Tags []Tag `xml:"Tag,omitempty" json:"Tag,omitempty"` -} - -// isEmpty returns true if Tags field is null -func (a And) isEmpty() bool { - return len(a.Tags) == 0 && a.Prefix == "" -} - -// Status represents Enabled/Disabled status -type Status string - -// Supported status types -const ( - Enabled Status = "Enabled" - Disabled Status = "Disabled" -) - -// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html -type DeleteMarkerReplication struct { - Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default -} - -// IsEmpty returns true if DeleteMarkerReplication is not set -func (d DeleteMarkerReplication) IsEmpty() bool { - return len(d.Status) == 0 -} - -// DeleteReplication - whether versioned deletes are replicated - this -// is a MinIO specific extension -type DeleteReplication struct { - Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default -} - -// IsEmpty returns true if DeleteReplication is not set -func (d DeleteReplication) IsEmpty() bool { - return len(d.Status) == 0 -} - -// ReplicaModifications specifies if replica modification sync is enabled -type ReplicaModifications struct { - Status Status `xml:"Status" json:"Status"` // should be set to "Enabled" by default -} - -// SourceSelectionCriteria - specifies additional source selection criteria in ReplicationConfiguration. -type SourceSelectionCriteria struct { - ReplicaModifications ReplicaModifications `xml:"ReplicaModifications" json:"ReplicaModifications"` -} - -// IsValid - checks whether SourceSelectionCriteria is valid or not. -func (s SourceSelectionCriteria) IsValid() bool { - return s.ReplicaModifications.Status == Enabled || s.ReplicaModifications.Status == Disabled -} - -// Validate source selection criteria -func (s SourceSelectionCriteria) Validate() error { - if (s == SourceSelectionCriteria{}) { - return nil - } - if !s.IsValid() { - return fmt.Errorf("invalid ReplicaModification status") - } - return nil -} - -// ExistingObjectReplication - whether existing object replication is enabled -type ExistingObjectReplication struct { - Status Status `xml:"Status"` // should be set to "Disabled" by default -} - -// IsEmpty returns true if DeleteMarkerReplication is not set -func (e ExistingObjectReplication) IsEmpty() bool { - return len(e.Status) == 0 -} - -// Validate validates whether the status is disabled. -func (e ExistingObjectReplication) Validate() error { - if e.IsEmpty() { - return nil - } - if e.Status != Disabled && e.Status != Enabled { - return fmt.Errorf("invalid ExistingObjectReplication status") - } - return nil -} - -// Metrics represents inline replication metrics -// such as pending, failed and completed bytes in total for a bucket -type Metrics struct { - // Pending size in bytes - PendingSize uint64 `json:"pendingReplicationSize"` - // Completed size in bytes - ReplicatedSize uint64 `json:"completedReplicationSize"` - // Total Replica size in bytes - ReplicaSize uint64 `json:"replicaSize"` - // Failed size in bytes - FailedSize uint64 `json:"failedReplicationSize"` - // Total number of pending operations including metadata updates - PendingCount uint64 `json:"pendingReplicationCount"` - // Total number of failed operations including metadata updates - FailedCount uint64 `json:"failedReplicationCount"` -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go deleted file mode 100644 index fea25d6e..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3utils - -import ( - "bytes" - "encoding/hex" - "errors" - "net" - "net/url" - "regexp" - "sort" - "strings" - "unicode/utf8" -) - -// Sentinel URL is the default url value which is invalid. -var sentinelURL = url.URL{} - -// IsValidDomain validates if input string is a valid domain name. -func IsValidDomain(host string) bool { - // See RFC 1035, RFC 3696. - host = strings.TrimSpace(host) - if len(host) == 0 || len(host) > 255 { - return false - } - // host cannot start or end with "-" - if host[len(host)-1:] == "-" || host[:1] == "-" { - return false - } - // host cannot start or end with "_" - if host[len(host)-1:] == "_" || host[:1] == "_" { - return false - } - // host cannot start with a "." - if host[:1] == "." { - return false - } - // All non alphanumeric characters are invalid. - if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { - return parts[1] - } - parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint. -func IsAliyunOSSEndpoint(endpointURL url.URL) bool { - return strings.HasSuffix(endpointURL.Host, "aliyuncs.com") -} - -// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. -func IsAmazonEndpoint(endpointURL url.URL) bool { - if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { - return true - } - return GetRegionFromURL(endpointURL) != "" -} - -// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. -func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || - IsAmazonFIPSGovCloudEndpoint(endpointURL)) -} - -// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || - endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" -} - -// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - switch endpointURL.Host { - case "s3-fips.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-west-1.amazonaws.com": - case "s3-fips.dualstack.us-west-2.amazonaws.com": - case "s3-fips.dualstack.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-east-1.amazonaws.com": - case "s3-fips.us-west-1.amazonaws.com": - case "s3-fips.us-west-2.amazonaws.com": - case "s3-fips.us-east-1.amazonaws.com": - default: - return false - } - return true -} - -// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { - return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) -} - -// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. -func IsGoogleEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return endpointURL.Host == "storage.googleapis.com" -} - -// Expects ascii encoded strings - from output of urlEncodePath -func percentEncodeSlash(s string) string { - return strings.Replace(s, "/", "%2F", -1) -} - -// QueryEncode - encodes query values in their URL encoded form. In -// addition to the percent encoding performed by urlEncodePath() used -// here, it also percent encodes '/' (forward slash) -func QueryEncode(v url.Values) string { - if v == nil { - return "" - } - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := percentEncodeSlash(EncodePath(k)) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(percentEncodeSlash(EncodePath(v))) - } - } - return buf.String() -} - -// TagDecode - decodes canonical tag into map of key and value. -func TagDecode(ctag string) map[string]string { - if ctag == "" { - return map[string]string{} - } - tags := strings.Split(ctag, "&") - tagMap := make(map[string]string, len(tags)) - var err error - for _, tag := range tags { - kvs := strings.SplitN(tag, "=", 2) - if len(kvs) == 0 { - return map[string]string{} - } - if len(kvs) == 1 { - return map[string]string{} - } - tagMap[kvs[0]], err = url.PathUnescape(kvs[1]) - if err != nil { - continue - } - } - return tagMap -} - -// TagEncode - encodes tag values in their URL encoded form. In -// addition to the percent encoding performed by urlEncodePath() used -// here, it also percent encodes '/' (forward slash) -func TagEncode(tags map[string]string) string { - if tags == nil { - return "" - } - values := url.Values{} - for k, v := range tags { - values[k] = []string{v} - } - return QueryEncode(values) -} - -// if object matches reserved string, no need to encode them -var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - -// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func EncodePath(pathName string) string { - if reservedObjectNames.MatchString(pathName) { - return pathName - } - var encodedPathname strings.Builder - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname.WriteRune(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname.WriteRune(s) - continue - default: - len := utf8.RuneLen(s) - if len < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, len) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname.WriteString("%" + strings.ToUpper(hex)) - } - } - } - return encodedPathname.String() -} - -// We support '.' with bucket names but we fallback to using path -// style requests instead for such buckets. -var ( - validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) - validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) - ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) -) - -// Common checker for both stricter and basic validation. -func checkBucketNameCommon(bucketName string, strict bool) (err error) { - if strings.TrimSpace(bucketName) == "" { - return errors.New("Bucket name cannot be empty") - } - if len(bucketName) < 3 { - return errors.New("Bucket name cannot be shorter than 3 characters") - } - if len(bucketName) > 63 { - return errors.New("Bucket name cannot be longer than 63 characters") - } - if ipAddress.MatchString(bucketName) { - return errors.New("Bucket name cannot be an ip address") - } - if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { - return errors.New("Bucket name contains invalid characters") - } - if strict { - if !validBucketNameStrict.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") - } - return err - } - if !validBucketName.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") - } - return err -} - -// CheckValidBucketName - checks if we have a valid input bucket name. -func CheckValidBucketName(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, false) -} - -// CheckValidBucketNameStrict - checks if we have a valid input bucket name. -// This is a stricter version. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html -func CheckValidBucketNameStrict(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, true) -} - -// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func CheckValidObjectNamePrefix(objectName string) error { - if len(objectName) > 1024 { - return errors.New("Object name cannot be longer than 1024 characters") - } - if !utf8.ValidString(objectName) { - return errors.New("Object name with non UTF-8 strings are not supported") - } - return nil -} - -// CheckValidObjectName - checks if we have a valid input object name. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func CheckValidObjectName(objectName string) error { - if strings.TrimSpace(objectName) == "" { - return errors.New("Object name cannot be empty") - } - return CheckValidObjectNamePrefix(objectName) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go deleted file mode 100644 index c35e58e1..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/set/stringset.go +++ /dev/null @@ -1,200 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package set - -import ( - "fmt" - "sort" - - jsoniter "github.com/json-iterator/go" -) - -// StringSet - uses map as set of strings. -type StringSet map[string]struct{} - -var json = jsoniter.ConfigCompatibleWithStandardLibrary - -// ToSlice - returns StringSet as string slice. -func (set StringSet) ToSlice() []string { - keys := make([]string, 0, len(set)) - for k := range set { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// IsEmpty - returns whether the set is empty or not. -func (set StringSet) IsEmpty() bool { - return len(set) == 0 -} - -// Add - adds string to the set. -func (set StringSet) Add(s string) { - set[s] = struct{}{} -} - -// Remove - removes string in the set. It does nothing if string does not exist in the set. -func (set StringSet) Remove(s string) { - delete(set, s) -} - -// Contains - checks if string is in the set. -func (set StringSet) Contains(s string) bool { - _, ok := set[s] - return ok -} - -// FuncMatch - returns new set containing each value who passes match function. -// A 'matchFn' should accept element in a set as first argument and -// 'matchString' as second argument. The function can do any logic to -// compare both the arguments and should return true to accept element in -// a set to include in output set else the element is ignored. -func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { - nset := NewStringSet() - for k := range set { - if matchFn(k, matchString) { - nset.Add(k) - } - } - return nset -} - -// ApplyFunc - returns new set containing each value processed by 'applyFn'. -// A 'applyFn' should accept element in a set as a argument and return -// a processed string. The function can do any logic to return a processed -// string. -func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { - nset := NewStringSet() - for k := range set { - nset.Add(applyFn(k)) - } - return nset -} - -// Equals - checks whether given set is equal to current set or not. -func (set StringSet) Equals(sset StringSet) bool { - // If length of set is not equal to length of given set, the - // set is not equal to given set. - if len(set) != len(sset) { - return false - } - - // As both sets are equal in length, check each elements are equal. - for k := range set { - if _, ok := sset[k]; !ok { - return false - } - } - - return true -} - -// Intersection - returns the intersection with given set as new set. -func (set StringSet) Intersection(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - if _, ok := sset[k]; ok { - nset.Add(k) - } - } - - return nset -} - -// Difference - returns the difference with given set as new set. -func (set StringSet) Difference(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - if _, ok := sset[k]; !ok { - nset.Add(k) - } - } - - return nset -} - -// Union - returns the union with given set as new set. -func (set StringSet) Union(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - nset.Add(k) - } - - for k := range sset { - nset.Add(k) - } - - return nset -} - -// MarshalJSON - converts to JSON data. -func (set StringSet) MarshalJSON() ([]byte, error) { - return json.Marshal(set.ToSlice()) -} - -// UnmarshalJSON - parses JSON data and creates new set with it. -// If 'data' contains JSON string array, the set contains each string. -// If 'data' contains JSON string, the set contains the string as one element. -// If 'data' contains Other JSON types, JSON parse error is returned. -func (set *StringSet) UnmarshalJSON(data []byte) error { - sl := []string{} - var err error - if err = json.Unmarshal(data, &sl); err == nil { - *set = make(StringSet) - for _, s := range sl { - set.Add(s) - } - } else { - var s string - if err = json.Unmarshal(data, &s); err == nil { - *set = make(StringSet) - set.Add(s) - } - } - - return err -} - -// String - returns printable string of the set. -func (set StringSet) String() string { - return fmt.Sprintf("%s", set.ToSlice()) -} - -// NewStringSet - creates new string set. -func NewStringSet() StringSet { - return make(StringSet) -} - -// CreateStringSet - creates new string set with given string values. -func CreateStringSet(sl ...string) StringSet { - set := make(StringSet) - for _, k := range sl { - set.Add(k) - } - return set -} - -// CopyStringSet - returns copy of given set. -func CopyStringSet(set StringSet) StringSet { - nset := NewStringSet() - for k, v := range set { - nset[k] = v - } - return nset -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go deleted file mode 100644 index 7b2ca91d..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-streaming.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "strconv" - "strings" - "time" -) - -// Reference for constants used below - -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming -const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF -) - -// Request headers to be ignored while calculating seed signature for -// a request. -var ignoredStreamingHeaders = map[string]bool{ - "Authorization": true, - "User-Agent": true, - "Content-Type": true, -} - -// getSignedChunkLength - calculates the length of chunk metadata -func getSignedChunkLength(chunkDataSize int64) int64 { - return int64(len(fmt.Sprintf("%x", chunkDataSize))) + - chunkSigConstLen + - signatureStrLen + - crlfLen + - chunkDataSize + - crlfLen -} - -// getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64) int64 { - if dataLen <= 0 { - return 0 - } - - chunksCount := int64(dataLen / chunkSize) - remainingBytes := int64(dataLen % chunkSize) - streamLen := int64(0) - streamLen += chunksCount * getSignedChunkLength(chunkSize) - if remainingBytes > 0 { - streamLen += getSignedChunkLength(remainingBytes) - } - streamLen += getSignedChunkLength(0) - return streamLen -} - -// buildChunkStringToSign - returns the string to sign given chunk data -// and previous signature. -func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { - stringToSignParts := []string{ - streamingPayloadHdr, - t.Format(iso8601DateFormat), - getScope(region, t, ServiceTypeS3), - previousSig, - emptySHA256, - hex.EncodeToString(sum256(chunkData)), - } - - return strings.Join(stringToSignParts, "\n") -} - -// prepareStreamingRequest - prepares a request with appropriate -// headers before computing the seed signature. -func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { - // Set x-amz-content-sha256 header. - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) - // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) - req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) -} - -// buildChunkHeader - returns the chunk header. -// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n -func buildChunkHeader(chunkLen int64, signature string) []byte { - return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") -} - -// buildChunkSignature - returns chunk signature for a given chunk and previous signature. -func buildChunkSignature(chunkData []byte, reqTime time.Time, region, - previousSignature, secretAccessKey string) string { - - chunkStringToSign := buildChunkStringToSign(reqTime, region, - previousSignature, chunkData) - signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3) - return getSignature(signingKey, chunkStringToSign) -} - -// getSeedSignature - returns the seed signature for a given request. -func (s *StreamingReader) setSeedSignature(req *http.Request) { - // Get canonical request - canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req)) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3) - - signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3) - - // Calculate signature. - s.seedSignature = getSignature(signingKey, stringToSign) -} - -// StreamingReader implements chunked upload signature as a reader on -// top of req.Body's ReaderCloser chunk header;data;... repeat -type StreamingReader struct { - accessKeyID string - secretAccessKey string - sessionToken string - region string - prevSignature string - seedSignature string - contentLen int64 // Content-Length from req header - baseReadCloser io.ReadCloser // underlying io.Reader - bytesRead int64 // bytes read from underlying io.Reader - buf bytes.Buffer // holds signed chunk - chunkBuf []byte // holds raw data read from req Body - chunkBufLen int // no. of bytes read so far into chunkBuf - done bool // done reading the underlying reader to EOF - reqTime time.Time - chunkNum int - totalChunks int - lastChunkSize int -} - -// signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int) { - // Compute chunk signature for next header - signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, - s.region, s.prevSignature, s.secretAccessKey) - - // For next chunk signature computation - s.prevSignature = signature - - // Write chunk header into streaming buffer - chunkHdr := buildChunkHeader(int64(chunkLen), signature) - s.buf.Write(chunkHdr) - - // Write chunk data into streaming buffer - s.buf.Write(s.chunkBuf[:chunkLen]) - - // Write the chunk trailer. - s.buf.Write([]byte("\r\n")) - - // Reset chunkBufLen for next chunk read. - s.chunkBufLen = 0 - s.chunkNum++ -} - -// setStreamingAuthHeader - builds and sets authorization header value -// for streaming signature. -func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { - credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3) - authParts := []string{ - signV4Algorithm + " Credential=" + credential, - "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), - "Signature=" + s.seedSignature, - } - - // Set authorization header. - auth := strings.Join(authParts, ",") - req.Header.Set("Authorization", auth) -} - -// StreamingSignV4 - provides chunked upload signatureV4 support by -// implementing io.Reader. -func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, - region string, dataLen int64, reqTime time.Time) *http.Request { - - // Set headers needed for streaming signature. - prepareStreamingRequest(req, sessionToken, dataLen, reqTime) - - if req.Body == nil { - req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) - } - - stReader := &StreamingReader{ - baseReadCloser: req.Body, - accessKeyID: accessKeyID, - secretAccessKey: secretAccessKey, - sessionToken: sessionToken, - region: region, - reqTime: reqTime, - chunkBuf: make([]byte, payloadChunkSize), - contentLen: dataLen, - chunkNum: 1, - totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, - lastChunkSize: int(dataLen % payloadChunkSize), - } - - // Add the request headers required for chunk upload signing. - - // Compute the seed signature. - stReader.setSeedSignature(req) - - // Set the authorization header with the seed signature. - stReader.setStreamingAuthHeader(req) - - // Set seed signature as prevSignature for subsequent - // streaming signing process. - stReader.prevSignature = stReader.seedSignature - req.Body = stReader - - return req -} - -// Read - this method performs chunk upload signature providing a -// io.Reader interface. -func (s *StreamingReader) Read(buf []byte) (int, error) { - switch { - // After the last chunk is read from underlying reader, we - // never re-fill s.buf. - case s.done: - - // s.buf will be (re-)filled with next chunk when has lesser - // bytes than asked for. - case s.buf.Len() < len(buf): - s.chunkBufLen = 0 - for { - n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) - // Usually we validate `err` first, but in this case - // we are validating n > 0 for the following reasons. - // - // 1. n > 0, err is one of io.EOF, nil (near end of stream) - // A Reader returning a non-zero number of bytes at the end - // of the input stream may return either err == EOF or err == nil - // - // 2. n == 0, err is io.EOF (actual end of stream) - // - // Callers should always process the n > 0 bytes returned - // before considering the error err. - if n1 > 0 { - s.chunkBufLen += n1 - s.bytesRead += int64(n1) - - if s.chunkBufLen == payloadChunkSize || - (s.chunkNum == s.totalChunks-1 && - s.chunkBufLen == s.lastChunkSize) { - // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen) - break - } - } - if err != nil { - if err == io.EOF { - // No more data left in baseReader - last chunk. - // Done reading the last chunk from baseReader. - s.done = true - - // bytes read from baseReader different than - // content length provided. - if s.bytesRead != s.contentLen { - return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead) - } - - // Sign the chunk and write it to s.buf. - s.signChunk(0) - break - } - return 0, err - } - - } - } - return s.buf.Read(buf) -} - -// Close - this method makes underlying io.ReadCloser's Close method available. -func (s *StreamingReader) Close() error { - return s.baseReadCloser.Close() -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go deleted file mode 100644 index 71821a26..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v2.go +++ /dev/null @@ -1,317 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "bytes" - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// Signature and API related constants. -const ( - signV2Algorithm = "AWS" -) - -// Encode input URL path to URL encoded path. -func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { - if virtualHost { - reqHost := getHostAddr(req) - dotPos := strings.Index(reqHost, ".") - if dotPos > -1 { - bucketName := reqHost[:dotPos] - path = "/" + bucketName - path += req.URL.Path - path = s3utils.EncodePath(path) - return - } - } - path = s3utils.EncodePath(req.URL.Path) - return -} - -// PreSignV2 - presign the request in following style. -// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. -func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { - // Presign is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - d := time.Now().UTC() - // Find epoch expires when the request will expire. - epochExpires := d.Unix() + expires - - // Add expires header if not present. - if expiresStr := req.Header.Get("Expires"); expiresStr == "" { - req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) - } - - // Get presigned string to sign. - stringToSign := preStringToSignV2(req, virtualHost) - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(stringToSign)) - - // Calculate signature. - signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) - - query := req.URL.Query() - // Handle specially for Google Cloud Storage. - if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { - query.Set("GoogleAccessId", accessKeyID) - } else { - query.Set("AWSAccessKeyId", accessKeyID) - } - - // Fill in Expires for presigned query. - query.Set("Expires", strconv.FormatInt(epochExpires, 10)) - - // Encode query and save. - req.URL.RawQuery = s3utils.QueryEncode(query) - - // Save signature finally. - req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) - - // Return. - return &req -} - -// PostPresignSignatureV2 - presigned signature for PostPolicy -// request. -func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(policyBase64)) - signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) - return signature -} - -// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; -// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -// -// CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -// -// CanonicalizedProtocolHeaders = - -// SignV2 sign the request before Do() (AWS Signature Version 2). -func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { - // Signature calculation is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - d := time.Now().UTC() - - // Add date if not present. - if date := req.Header.Get("Date"); date == "" { - req.Header.Set("Date", d.Format(http.TimeFormat)) - } - - // Calculate HMAC for secretAccessKey. - stringToSign := stringToSignV2(req, virtualHost) - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(stringToSign)) - - // Prepare auth header. - authHeader := new(bytes.Buffer) - authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) - encoder := base64.NewEncoder(base64.StdEncoding, authHeader) - encoder.Write(hm.Sum(nil)) - encoder.Close() - - // Set Authorization header. - req.Header.Set("Authorization", authHeader.String()) - - return &req -} - -// From the Amazon docs: -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Expires + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -func preStringToSignV2(req http.Request, virtualHost bool) string { - buf := new(bytes.Buffer) - // Write standard headers. - writePreSignV2Headers(buf, req) - // Write canonicalized protocol headers if any. - writeCanonicalizedHeaders(buf, req) - // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req, virtualHost) - return buf.String() -} - -// writePreSignV2Headers - write preSign v2 required headers. -func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method + "\n") - buf.WriteString(req.Header.Get("Content-Md5") + "\n") - buf.WriteString(req.Header.Get("Content-Type") + "\n") - buf.WriteString(req.Header.Get("Expires") + "\n") -} - -// From the Amazon docs: -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -func stringToSignV2(req http.Request, virtualHost bool) string { - buf := new(bytes.Buffer) - // Write standard headers. - writeSignV2Headers(buf, req) - // Write canonicalized protocol headers if any. - writeCanonicalizedHeaders(buf, req) - // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req, virtualHost) - return buf.String() -} - -// writeSignV2Headers - write signV2 required headers. -func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method + "\n") - buf.WriteString(req.Header.Get("Content-Md5") + "\n") - buf.WriteString(req.Header.Get("Content-Type") + "\n") - buf.WriteString(req.Header.Get("Date") + "\n") -} - -// writeCanonicalizedHeaders - write canonicalized headers. -func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { - var protoHeaders []string - vals := make(map[string][]string) - for k, vv := range req.Header { - // All the AMZ headers should be lowercase - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-amz") { - protoHeaders = append(protoHeaders, lk) - vals[lk] = vv - } - } - sort.Strings(protoHeaders) - for _, k := range protoHeaders { - buf.WriteString(k) - buf.WriteByte(':') - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - if strings.Contains(v, "\n") { - // TODO: "Unfold" long headers that - // span multiple lines (as allowed by - // RFC 2616, section 4.2) by replacing - // the folding white-space (including - // new-line) by a single space. - buf.WriteString(v) - } else { - buf.WriteString(v) - } - } - buf.WriteByte('\n') - } -} - -// AWS S3 Signature V2 calculation rule is give here: -// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign - -// Whitelist resource list that will be used in query string for signature-V2 calculation. -// The list should be alphabetically sorted -var resourceList = []string{ - "acl", - "delete", - "lifecycle", - "location", - "logging", - "notification", - "partNumber", - "policy", - "replication", - "requestPayment", - "response-cache-control", - "response-content-disposition", - "response-content-encoding", - "response-content-language", - "response-content-type", - "response-expires", - "torrent", - "uploadId", - "uploads", - "versionId", - "versioning", - "versions", - "website", -} - -// From the Amazon docs: -// -// CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { - // Save request URL. - requestURL := req.URL - // Get encoded URL path. - buf.WriteString(encodeURL2Path(&req, virtualHost)) - if requestURL.RawQuery != "" { - var n int - vals, _ := url.ParseQuery(requestURL.RawQuery) - // Verify if any sub resource queries are present, if yes - // canonicallize them. - for _, resource := range resourceList { - if vv, ok := vals[resource]; ok && len(vv) > 0 { - n++ - // First element - switch n { - case 1: - buf.WriteByte('?') - // The rest - default: - buf.WriteByte('&') - } - buf.WriteString(resource) - // Request parameters - if len(vv[0]) > 0 { - buf.WriteByte('=') - buf.WriteString(vv[0]) - } - } - } - } -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go deleted file mode 100644 index 67572b20..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/request-signature-v4.go +++ /dev/null @@ -1,318 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "bytes" - "encoding/hex" - "net/http" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/s3utils" -) - -// Signature and API related constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" - yyyymmdd = "20060102" -) - -// Different service types -const ( - ServiceTypeS3 = "s3" - ServiceTypeSTS = "sts" -) - -/// -/// Excerpts from @lsegal - -/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. -/// -/// User-Agent: -/// -/// This is ignored from signing because signing this causes -/// problems with generating pre-signed URLs (that are executed -/// by other agents) or when customers pass requests through -/// proxies, which may modify the user-agent. -/// -/// -/// Authorization: -/// -/// Is skipped for obvious reasons -/// -var v4IgnoredHeaders = map[string]bool{ - "Authorization": true, - "User-Agent": true, -} - -// getSigningKey hmac seed to calculate final signature. -func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte { - date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) - location := sumHMAC(date, []byte(loc)) - service := sumHMAC(location, []byte(serviceType)) - signingKey := sumHMAC(service, []byte("aws4_request")) - return signingKey -} - -// getSignature final signature in hexadecimal form. -func getSignature(signingKey []byte, stringToSign string) string { - return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) -} - -// getScope generate a string of a specific date, an AWS region, and a -// service. -func getScope(location string, t time.Time, serviceType string) string { - scope := strings.Join([]string{ - t.Format(yyyymmdd), - location, - serviceType, - "aws4_request", - }, "/") - return scope -} - -// GetCredential generate a credential string. -func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string { - scope := getScope(location, t, serviceType) - return accessKeyID + "/" + scope -} - -// getHashedPayload get the hexadecimal value of the SHA256 hash of -// the request payload. -func getHashedPayload(req http.Request) string { - hashedPayload := req.Header.Get("X-Amz-Content-Sha256") - if hashedPayload == "" { - // Presign does not have a payload, use S3 recommended value. - hashedPayload = unsignedPayload - } - return hashedPayload -} - -// getCanonicalHeaders generate a list of request headers for -// signature. -func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { - var headers []string - vals := make(map[string][]string) - for k, vv := range req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - vals[strings.ToLower(k)] = vv - } - headers = append(headers, "host") - sort.Strings(headers) - - var buf bytes.Buffer - // Save all the headers in canonical form
: newline - // separated for each header. - for _, k := range headers { - buf.WriteString(k) - buf.WriteByte(':') - switch { - case k == "host": - buf.WriteString(getHostAddr(&req)) - fallthrough - default: - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(signV4TrimAll(v)) - } - buf.WriteByte('\n') - } - } - return buf.String() -} - -// getSignedHeaders generate all signed request headers. -// i.e lexically sorted, semicolon-separated list of lowercase -// request header names. -func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { - var headers []string - for k := range req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // Ignored header found continue. - } - headers = append(headers, strings.ToLower(k)) - } - headers = append(headers, "host") - sort.Strings(headers) - return strings.Join(headers, ";") -} - -// getCanonicalRequest generate a canonical request of style. -// -// canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// -func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string { - req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) - canonicalRequest := strings.Join([]string{ - req.Method, - s3utils.EncodePath(req.URL.Path), - req.URL.RawQuery, - getCanonicalHeaders(req, ignoredHeaders), - getSignedHeaders(req, ignoredHeaders), - hashedPayload, - }, "\n") - return canonicalRequest -} - -// getStringToSign a string based on selected query values. -func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string { - stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" - stringToSign = stringToSign + getScope(location, t, serviceType) + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) - return stringToSign -} - -// PreSignV4 presign the request, in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. -func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { - // Presign is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - t := time.Now().UTC() - - // Get credential string. - credential := GetCredential(accessKeyID, location, t, ServiceTypeS3) - - // Get all signed headers. - signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) - - // Set URL query. - query := req.URL.Query() - query.Set("X-Amz-Algorithm", signV4Algorithm) - query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) - query.Set("X-Amz-SignedHeaders", signedHeaders) - query.Set("X-Amz-Credential", credential) - // Set session token if available. - if sessionToken != "" { - query.Set("X-Amz-Security-Token", sessionToken) - } - req.URL.RawQuery = query.Encode() - - // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req)) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3) - - // Gext hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) - - // Calculate signature. - signature := getSignature(signingKey, stringToSign) - - // Add signature header to RawQuery. - req.URL.RawQuery += "&X-Amz-Signature=" + signature - - return &req -} - -// PostPresignSignatureV4 - presigned signature for PostPolicy -// requests. -func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { - // Get signining key. - signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3) - // Calculate signature. - signature := getSignature(signingkey, policyBase64) - return signature -} - -// SignV4STS - signature v4 for STS request. -func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS) -} - -// Internal function called for different service types. -func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request { - // Signature calculation is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - t := time.Now().UTC() - - // Set x-amz-date. - req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - - // Set session token if available. - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - hashedPayload := getHashedPayload(req) - if serviceType == ServiceTypeSTS { - // Content sha256 header is not sent with the request - // but it is expected to have sha256 of payload for signature - // in STS service type request. - req.Header.Del("X-Amz-Content-Sha256") - } - - // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType) - - // Get hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t, serviceType) - - // Get credential string. - credential := GetCredential(accessKeyID, location, t, serviceType) - - // Get all signed headers. - signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) - - // Calculate signature. - signature := getSignature(signingKey, stringToSign) - - // If regular request, construct the final authorization header. - parts := []string{ - signV4Algorithm + " Credential=" + credential, - "SignedHeaders=" + signedHeaders, - "Signature=" + signature, - } - - // Set authorization header. - auth := strings.Join(parts, ", ") - req.Header.Set("Authorization", auth) - - return &req -} - -// SignV4 sign the request before Do(), in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go deleted file mode 100644 index 2192a369..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/signer/utils.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package signer - -import ( - "crypto/hmac" - "net/http" - "strings" - - "github.com/minio/sha256-simd" -) - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// sum256 calculate sha256 sum for an input byte array. -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// sumHMAC calculate hmac between two input byte array. -func sumHMAC(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -// getHostAddr returns host header if available, otherwise returns host from URL -func getHostAddr(req *http.Request) string { - if req.Host != "" { - return req.Host - } - return req.URL.Host -} - -// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() -// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -func signV4TrimAll(input string) string { - // Compress adjacent spaces (a space is determined by - // unicode.IsSpace() internally here) to one space and return - return strings.Join(strings.Fields(input), " ") -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go deleted file mode 100644 index b5fb9565..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sse - -import "encoding/xml" - -// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate -// KMS, SSEAlgoritm needs to be set to "aws:kms" -// Minio currently does not support Kms. -type ApplySSEByDefault struct { - KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"` - SSEAlgorithm string `xml:"SSEAlgorithm"` -} - -// Rule layer encapsulates default encryption configuration -type Rule struct { - Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"` -} - -// Configuration is the default encryption configuration structure -type Configuration struct { - XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` - Rules []Rule `xml:"Rule"` -} - -// NewConfigurationSSES3 initializes a new SSE-S3 configuration -func NewConfigurationSSES3() *Configuration { - return &Configuration{ - Rules: []Rule{ - { - Apply: ApplySSEByDefault{ - SSEAlgorithm: "AES256", - }, - }, - }, - } -} - -// NewConfigurationSSEKMS initializes a new SSE-KMS configuration -func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration { - return &Configuration{ - Rules: []Rule{ - { - Apply: ApplySSEByDefault{ - KmsMasterKeyID: kmsMasterKey, - SSEAlgorithm: "aws:kms", - }, - }, - }, - } -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/mantle/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go deleted file mode 100644 index d7c65af5..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package tags - -import ( - "encoding/xml" - "io" - "net/url" - "strings" - "unicode/utf8" -) - -// Error contains tag specific error. -type Error interface { - error - Code() string -} - -type errTag struct { - code string - message string -} - -// Code contains error code. -func (err errTag) Code() string { - return err.code -} - -// Error contains error message. -func (err errTag) Error() string { - return err.message -} - -var ( - errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"} - errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"} - errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"} - errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"} - errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"} -) - -// Tag comes with limitation as per -// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd -// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions -const ( - maxKeyLength = 128 - maxValueLength = 256 - maxObjectTagCount = 10 - maxTagCount = 50 -) - -func checkKey(key string) error { - if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") { - return errInvalidTagKey - } - - return nil -} - -func checkValue(value string) error { - if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") { - return errInvalidTagValue - } - - return nil -} - -// Tag denotes key and value. -type Tag struct { - Key string `xml:"Key"` - Value string `xml:"Value"` -} - -func (tag Tag) String() string { - return tag.Key + "=" + tag.Value -} - -// IsEmpty returns whether this tag is empty or not. -func (tag Tag) IsEmpty() bool { - return tag.Key == "" -} - -// Validate checks this tag. -func (tag Tag) Validate() error { - if err := checkKey(tag.Key); err != nil { - return err - } - - return checkValue(tag.Value) -} - -// MarshalXML encodes to XML data. -func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := tag.Validate(); err != nil { - return err - } - - type subTag Tag // to avoid recursively calling MarshalXML() - return e.EncodeElement(subTag(tag), start) -} - -// UnmarshalXML decodes XML data to tag. -func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - type subTag Tag // to avoid recursively calling UnmarshalXML() - var st subTag - if err := d.DecodeElement(&st, &start); err != nil { - return err - } - - if err := Tag(st).Validate(); err != nil { - return err - } - - *tag = Tag(st) - return nil -} - -// tagSet represents list of unique tags. -type tagSet struct { - tagMap map[string]string - isObject bool -} - -func (tags tagSet) String() string { - vals := make(url.Values) - for key, value := range tags.tagMap { - vals.Set(key, value) - } - return vals.Encode() -} - -func (tags *tagSet) remove(key string) { - delete(tags.tagMap, key) -} - -func (tags *tagSet) set(key, value string, failOnExist bool) error { - if failOnExist { - if _, found := tags.tagMap[key]; found { - return errDuplicateTagKey - } - } - - if err := checkKey(key); err != nil { - return err - } - - if err := checkValue(value); err != nil { - return err - } - - if tags.isObject { - if len(tags.tagMap) == maxObjectTagCount { - return errTooManyObjectTags - } - } else if len(tags.tagMap) == maxTagCount { - return errTooManyTags - } - - tags.tagMap[key] = value - return nil -} - -func (tags tagSet) toMap() map[string]string { - m := make(map[string]string) - for key, value := range tags.tagMap { - m[key] = value - } - return m -} - -// MarshalXML encodes to XML data. -func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - tagList := struct { - Tags []Tag `xml:"Tag"` - }{} - - for key, value := range tags.tagMap { - tagList.Tags = append(tagList.Tags, Tag{key, value}) - } - - return e.EncodeElement(tagList, start) -} - -// UnmarshalXML decodes XML data to tag list. -func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - tagList := struct { - Tags []Tag `xml:"Tag"` - }{} - - if err := d.DecodeElement(&tagList, &start); err != nil { - return err - } - - if tags.isObject { - if len(tagList.Tags) > maxObjectTagCount { - return errTooManyObjectTags - } - } else if len(tagList.Tags) > maxTagCount { - return errTooManyTags - } - - m := map[string]string{} - for _, tag := range tagList.Tags { - if _, found := m[tag.Key]; found { - return errDuplicateTagKey - } - - m[tag.Key] = tag.Value - } - - tags.tagMap = m - return nil -} - -type tagging struct { - XMLName xml.Name `xml:"Tagging"` - TagSet *tagSet `xml:"TagSet"` -} - -// Tags is list of tags of XML request/response as per -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody -type Tags tagging - -func (tags Tags) String() string { - return tags.TagSet.String() -} - -// Remove removes a tag by its key. -func (tags *Tags) Remove(key string) { - tags.TagSet.remove(key) -} - -// Set sets new tag. -func (tags *Tags) Set(key, value string) error { - return tags.TagSet.set(key, value, false) -} - -// ToMap returns copy of tags. -func (tags Tags) ToMap() map[string]string { - return tags.TagSet.toMap() -} - -// MapToObjectTags converts an input map of key and value into -// *Tags data structure with validation. -func MapToObjectTags(tagMap map[string]string) (*Tags, error) { - return NewTags(tagMap, true) -} - -// MapToBucketTags converts an input map of key and value into -// *Tags data structure with validation. -func MapToBucketTags(tagMap map[string]string) (*Tags, error) { - return NewTags(tagMap, false) -} - -// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags. -func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) { - tagging := &Tags{ - TagSet: &tagSet{ - tagMap: make(map[string]string), - isObject: isObject, - }, - } - - for key, value := range tagMap { - if err := tagging.TagSet.set(key, value, true); err != nil { - return nil, err - } - } - - return tagging, nil -} - -func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) { - tagging := &Tags{ - TagSet: &tagSet{ - tagMap: make(map[string]string), - isObject: isObject, - }, - } - - if err := xml.NewDecoder(reader).Decode(tagging); err != nil { - return nil, err - } - - return tagging, nil -} - -// ParseBucketXML decodes XML data of tags in reader specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax. -func ParseBucketXML(reader io.Reader) (*Tags, error) { - return unmarshalXML(reader, false) -} - -// ParseObjectXML decodes XML data of tags in reader specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax -func ParseObjectXML(reader io.Reader) (*Tags, error) { - return unmarshalXML(reader, true) -} - -// Parse decodes HTTP query formatted string into tags which is limited by isObject. -// A query formatted string is like "key1=value1&key2=value2". -func Parse(s string, isObject bool) (*Tags, error) { - values, err := url.ParseQuery(s) - if err != nil { - return nil, err - } - - tagging := &Tags{ - TagSet: &tagSet{ - tagMap: make(map[string]string), - isObject: isObject, - }, - } - - for key := range values { - if err := tagging.TagSet.set(key, values.Get(key), true); err != nil { - return nil, err - } - } - - return tagging, nil -} - -// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2". -func ParseObjectTags(s string) (*Tags, error) { - return Parse(s, true) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/post-policy.go b/mantle/vendor/github.com/minio/minio-go/v7/post-policy.go deleted file mode 100644 index 31a7308c..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/post-policy.go +++ /dev/null @@ -1,327 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/base64" - "fmt" - "strings" - "time" -) - -// expirationDateFormat date format for expiration key in json policy. -const expirationDateFormat = "2006-01-02T15:04:05.999Z" - -// policyCondition explanation: -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html -// -// Example: -// -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } -// -type policyCondition struct { - matchType string - condition string - value string -} - -// PostPolicy - Provides strict static type conversion and validation -// for Amazon S3's POST policy JSON string. -type PostPolicy struct { - // Expiration date and time of the POST policy. - expiration time.Time - // Collection of different policy conditions. - conditions []policyCondition - // ContentLengthRange minimum and maximum allowable size for the - // uploaded content. - contentLengthRange struct { - min int64 - max int64 - } - - // Post form data. - formData map[string]string -} - -// NewPostPolicy - Instantiate new post policy. -func NewPostPolicy() *PostPolicy { - p := &PostPolicy{} - p.conditions = make([]policyCondition, 0) - p.formData = make(map[string]string) - return p -} - -// SetExpires - Sets expiration time for the new policy. -func (p *PostPolicy) SetExpires(t time.Time) error { - if t.IsZero() { - return errInvalidArgument("No expiry time set.") - } - p.expiration = t - return nil -} - -// SetKey - Sets an object name for the policy based upload. -func (p *PostPolicy) SetKey(key string) error { - if strings.TrimSpace(key) == "" || key == "" { - return errInvalidArgument("Object name is empty.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$key", - value: key, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["key"] = key - return nil -} - -// SetKeyStartsWith - Sets an object name that an policy based upload -// can start with. -func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { - if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return errInvalidArgument("Object prefix is empty.") - } - policyCond := policyCondition{ - matchType: "starts-with", - condition: "$key", - value: keyStartsWith, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["key"] = keyStartsWith - return nil -} - -// SetBucket - Sets bucket at which objects will be uploaded to. -func (p *PostPolicy) SetBucket(bucketName string) error { - if strings.TrimSpace(bucketName) == "" || bucketName == "" { - return errInvalidArgument("Bucket name is empty.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$bucket", - value: bucketName, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["bucket"] = bucketName - return nil -} - -// SetCondition - Sets condition for credentials, date and algorithm -func (p *PostPolicy) SetCondition(matchType, condition, value string) error { - if strings.TrimSpace(value) == "" || value == "" { - return errInvalidArgument("No value specified for condition") - } - - policyCond := policyCondition{ - matchType: matchType, - condition: "$" + condition, - value: value, - } - if condition == "X-Amz-Credential" || condition == "X-Amz-Date" || condition == "X-Amz-Algorithm" { - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[condition] = value - return nil - } - return errInvalidArgument("Invalid condition in policy") -} - -// SetContentType - Sets content-type of the object for this policy -// based upload. -func (p *PostPolicy) SetContentType(contentType string) error { - if strings.TrimSpace(contentType) == "" || contentType == "" { - return errInvalidArgument("No content type specified.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$Content-Type", - value: contentType, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["Content-Type"] = contentType - return nil -} - -// SetContentTypeStartsWith - Sets what content-type of the object for this policy -// based upload can start with. -func (p *PostPolicy) SetContentTypeStartsWith(contentTypeStartsWith string) error { - if strings.TrimSpace(contentTypeStartsWith) == "" || contentTypeStartsWith == "" { - return errInvalidArgument("No content type specified.") - } - policyCond := policyCondition{ - matchType: "starts-with", - condition: "$Content-Type", - value: contentTypeStartsWith, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["Content-Type"] = contentTypeStartsWith - return nil -} - -// SetContentLengthRange - Set new min and max content length -// condition for all incoming uploads. -func (p *PostPolicy) SetContentLengthRange(min, max int64) error { - if min > max { - return errInvalidArgument("Minimum limit is larger than maximum limit.") - } - if min < 0 { - return errInvalidArgument("Minimum limit cannot be negative.") - } - if max < 0 { - return errInvalidArgument("Maximum limit cannot be negative.") - } - p.contentLengthRange.min = min - p.contentLengthRange.max = max - return nil -} - -// SetSuccessActionRedirect - Sets the redirect success url of the object for this policy -// based upload. -func (p *PostPolicy) SetSuccessActionRedirect(redirect string) error { - if strings.TrimSpace(redirect) == "" || redirect == "" { - return errInvalidArgument("Redirect is empty") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$success_action_redirect", - value: redirect, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["success_action_redirect"] = redirect - return nil -} - -// SetSuccessStatusAction - Sets the status success code of the object for this policy -// based upload. -func (p *PostPolicy) SetSuccessStatusAction(status string) error { - if strings.TrimSpace(status) == "" || status == "" { - return errInvalidArgument("Status is empty") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$success_action_status", - value: status, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["success_action_status"] = status - return nil -} - -// SetUserMetadata - Set user metadata as a key/value couple. -// Can be retrieved through a HEAD request or an event. -func (p *PostPolicy) SetUserMetadata(key string, value string) error { - if strings.TrimSpace(key) == "" || key == "" { - return errInvalidArgument("Key is empty") - } - if strings.TrimSpace(value) == "" || value == "" { - return errInvalidArgument("Value is empty") - } - headerName := fmt.Sprintf("x-amz-meta-%s", key) - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", headerName), - value: value, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[headerName] = value - return nil -} - -// SetUserData - Set user data as a key/value couple. -// Can be retrieved through a HEAD request or an event. -func (p *PostPolicy) SetUserData(key string, value string) error { - if key == "" { - return errInvalidArgument("Key is empty") - } - if value == "" { - return errInvalidArgument("Value is empty") - } - headerName := fmt.Sprintf("x-amz-%s", key) - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", headerName), - value: value, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[headerName] = value - return nil -} - -// addNewPolicy - internal helper to validate adding new policies. -func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { - if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { - return errInvalidArgument("Policy fields are empty.") - } - p.conditions = append(p.conditions, policyCond) - return nil -} - -// String function for printing policy in json formatted string. -func (p PostPolicy) String() string { - return string(p.marshalJSON()) -} - -// marshalJSON - Provides Marshaled JSON in bytes. -func (p PostPolicy) marshalJSON() []byte { - expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` - var conditionsStr string - conditions := []string{} - for _, po := range p.conditions { - conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) - } - if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { - conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", - p.contentLengthRange.min, p.contentLengthRange.max)) - } - if len(conditions) > 0 { - conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" - } - retStr := "{" - retStr = retStr + expirationStr + "," - retStr = retStr + conditionsStr - retStr = retStr + "}" - return []byte(retStr) -} - -// base64 - Produces base64 of PostPolicy's Marshaled json. -func (p PostPolicy) base64() string { - return base64.StdEncoding.EncodeToString(p.marshalJSON()) -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/retry-continous.go b/mantle/vendor/github.com/minio/minio-go/v7/retry-continous.go deleted file mode 100644 index 3d25883b..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/retry-continous.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "time" - -// newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { - attemptCh := make(chan int) - - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // 1< maxAttempt { - attempt = maxAttempt - } - //sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - var nextBackoff int - for { - select { - // Attempts starts. - case attemptCh <- nextBackoff: - nextBackoff++ - case <-doneCh: - // Stop the routine. - return - } - time.Sleep(exponentialBackoffWait(nextBackoff)) - } - }() - return attemptCh -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/retry.go b/mantle/vendor/github.com/minio/minio-go/v7/retry.go deleted file mode 100644 index 598af297..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/retry.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net/http" - "time" -) - -// MaxRetry is the maximum number of retries before stopping. -var MaxRetry = 10 - -// MaxJitter will randomize over the full exponential backoff time -const MaxJitter = 1.0 - -// NoJitter disables the use of jitter for randomizing the exponential backoff time -const NoJitter = 0.0 - -// DefaultRetryUnit - default unit multiplicative per retry. -// defaults to 200 * time.Millisecond -var DefaultRetryUnit = 200 * time.Millisecond - -// DefaultRetryCap - Each retry attempt never waits no longer than -// this maximum time duration. -var DefaultRetryCap = time.Second - -// newRetryTimer creates a timer with exponentially increasing -// delays until the maximum retry attempts are reached. -func (c Client) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { - attemptCh := make(chan int) - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - //sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - for i := 0; i < maxRetry; i++ { - select { - case attemptCh <- i + 1: - case <-ctx.Done(): - return - } - - select { - case <-time.After(exponentialBackoffWait(i)): - case <-ctx.Done(): - return - } - } - }() - return attemptCh -} - -// List of AWS S3 error codes which are retryable. -var retryableS3Codes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "InternalError": {}, - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "SlowDown": {}, - // Add more AWS S3 codes here. -} - -// isS3CodeRetryable - is s3 error code retryable. -func isS3CodeRetryable(s3Code string) (ok bool) { - _, ok = retryableS3Codes[s3Code] - return ok -} - -// List of HTTP status codes which are retryable. -var retryableHTTPStatusCodes = map[int]struct{}{ - 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet - http.StatusInternalServerError: {}, - http.StatusBadGateway: {}, - http.StatusServiceUnavailable: {}, - http.StatusGatewayTimeout: {}, - // Add more HTTP status codes here. -} - -// isHTTPStatusRetryable - is HTTP error code retryable. -func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { - _, ok = retryableHTTPStatusCodes[httpStatusCode] - return ok -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/s3-endpoints.go b/mantle/vendor/github.com/minio/minio-go/v7/s3-endpoints.go deleted file mode 100644 index 9c8f02c8..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/s3-endpoints.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// awsS3EndpointMap Amazon S3 endpoint map. -var awsS3EndpointMap = map[string]string{ - "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", - "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", - "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", - "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", - "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", - "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", - "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", - "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", - "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", - "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", - "eu-south-1": "s3.dualstack.eu-south-1.amazonaws.com", - "ap-east-1": "s3.dualstack.ap-east-1.amazonaws.com", - "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", - "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", - "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", - "ap-northeast-3": "s3.dualstack.ap-northeast-3.amazonaws.com", - "af-south-1": "s3.dualstack.af-south-1.amazonaws.com", - "me-south-1": "s3.dualstack.me-south-1.amazonaws.com", - "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", - "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", - "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", - "cn-north-1": "s3.dualstack.cn-north-1.amazonaws.com.cn", - "cn-northwest-1": "s3.dualstack.cn-northwest-1.amazonaws.com.cn", -} - -// getS3Endpoint get Amazon S3 endpoint based on the bucket location. -func getS3Endpoint(bucketLocation string) (s3Endpoint string) { - s3Endpoint, ok := awsS3EndpointMap[bucketLocation] - if !ok { - // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. - s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" - } - return s3Endpoint -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/s3-error.go b/mantle/vendor/github.com/minio/minio-go/v7/s3-error.go deleted file mode 100644 index f365157e..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/s3-error.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// Non exhaustive list of AWS S3 standard error responses - -// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -var s3ErrorResponseMap = map[string]string{ - "AccessDenied": "Access Denied.", - "BadDigest": "The Content-Md5 you specified did not match what we received.", - "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", - "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", - "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", - "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", - "InvalidBucketName": "The specified bucket is not valid.", - "InvalidDigest": "The Content-Md5 you specified is not valid.", - "InvalidRange": "The requested range is not satisfiable", - "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", - "MissingContentLength": "You must provide the Content-Length HTTP header.", - "MissingContentMD5": "Missing required header for this request: Content-Md5.", - "MissingRequestBodyError": "Request body is empty.", - "NoSuchBucket": "The specified bucket does not exist.", - "NoSuchBucketPolicy": "The bucket policy does not exist", - "NoSuchKey": "The specified key does not exist.", - "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - "NotImplemented": "A header you provided implies functionality that is not implemented", - "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", - "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", - "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - "MethodNotAllowed": "The specified method is not allowed against this resource.", - "InvalidPart": "One or more of the specified parts could not be found.", - "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - "InvalidObjectState": "The operation is not valid for the current state of the object.", - "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", - "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", - "BucketNotEmpty": "The bucket you tried to delete is not empty", - "AllAccessDisabled": "All access to this bucket has been disabled.", - "MalformedPolicy": "Policy has invalid resource.", - "MissingFields": "Missing fields in request.", - "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", - "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", - "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", - "InvalidDuration": "Duration provided in the request is invalid.", - "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", - // Add new API errors here. -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/transport.go b/mantle/vendor/github.com/minio/minio-go/v7/transport.go deleted file mode 100644 index d5ad15b8..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/transport.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build go1.7 go1.8 - -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - "net" - "net/http" - "os" - "time" -) - -// mustGetSystemCertPool - return system CAs or empty pool in case of error (or windows) -func mustGetSystemCertPool() *x509.CertPool { - pool, err := x509.SystemCertPool() - if err != nil { - return x509.NewCertPool() - } - return pool -} - -// DefaultTransport - this default transport is similar to -// http.DefaultTransport but with additional param DisableCompression -// is set to true to avoid decompressing content with 'gzip' encoding. -var DefaultTransport = func(secure bool) (*http.Transport, error) { - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - MaxIdleConns: 256, - MaxIdleConnsPerHost: 16, - ResponseHeaderTimeout: time.Minute, - IdleConnTimeout: time.Minute, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 10 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, - } - - if secure { - tr.TLSClientConfig = &tls.Config{ - // Can't use SSLv3 because of POODLE and BEAST - // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher - // Can't use TLSv1.1 because of RC4 cipher usage - MinVersion: tls.VersionTLS12, - } - if f := os.Getenv("SSL_CERT_FILE"); f != "" { - rootCAs := mustGetSystemCertPool() - data, err := ioutil.ReadFile(f) - if err == nil { - rootCAs.AppendCertsFromPEM(data) - } - tr.TLSClientConfig.RootCAs = rootCAs - } - } - return tr, nil -} diff --git a/mantle/vendor/github.com/minio/minio-go/v7/utils.go b/mantle/vendor/github.com/minio/minio-go/v7/utils.go deleted file mode 100644 index 4bdf1a3c..00000000 --- a/mantle/vendor/github.com/minio/minio-go/v7/utils.go +++ /dev/null @@ -1,488 +0,0 @@ -/* - * MinIO Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/md5" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "fmt" - "hash" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" - "sync" - "time" - - md5simd "github.com/minio/md5-simd" - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/sha256-simd" -) - -func trimEtag(etag string) string { - etag = strings.TrimPrefix(etag, "\"") - return strings.TrimSuffix(etag, "\"") -} - -var expirationRegex = regexp.MustCompile(`expiry-date="(.*?)", rule-id="(.*?)"`) - -func amzExpirationToExpiryDateRuleID(expiration string) (time.Time, string) { - if matches := expirationRegex.FindStringSubmatch(expiration); len(matches) == 3 { - expTime, err := time.Parse(http.TimeFormat, matches[1]) - if err != nil { - return time.Time{}, "" - } - return expTime, matches[2] - } - return time.Time{}, "" -} - -// xmlDecoder provide decoded value in xml. -func xmlDecoder(body io.Reader, v interface{}) error { - d := xml.NewDecoder(body) - return d.Decode(v) -} - -// sum256 calculate sha256sum for an input byte array, returns hex encoded. -func sum256Hex(data []byte) string { - hash := newSHA256Hasher() - defer hash.Close() - hash.Write(data) - return hex.EncodeToString(hash.Sum(nil)) -} - -// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. -func sumMD5Base64(data []byte) string { - hash := newMd5Hasher() - defer hash.Close() - hash.Write(data) - return base64.StdEncoding.EncodeToString(hash.Sum(nil)) -} - -// getEndpointURL - construct a new endpoint. -func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { - if strings.Contains(endpoint, ":") { - host, _, err := net.SplitHostPort(endpoint) - if err != nil { - return nil, err - } - if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, errInvalidArgument(msg) - } - } else { - if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, errInvalidArgument(msg) - } - } - // If secure is false, use 'http' scheme. - scheme := "https" - if !secure { - scheme = "http" - } - - // Construct a secured endpoint URL. - endpointURLStr := scheme + "://" + endpoint - endpointURL, err := url.Parse(endpointURLStr) - if err != nil { - return nil, err - } - - // Validate incoming endpoint URL. - if err := isValidEndpointURL(*endpointURL); err != nil { - return nil, err - } - return endpointURL, nil -} - -// closeResponse close non nil response with any response Body. -// convenient wrapper to drain any remaining data on response body. -// -// Subsequently this allows golang http RoundTripper -// to re-use the same connection for future requests. -func closeResponse(resp *http.Response) { - // Callers should close resp.Body when done reading from it. - // If resp.Body is not closed, the Client's underlying RoundTripper - // (typically Transport) may not be able to re-use a persistent TCP - // connection to the server for a subsequent "keep-alive" request. - if resp != nil && resp.Body != nil { - // Drain any remaining Body and then close the connection. - // Without this closing connection would disallow re-using - // the same connection for future uses. - // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - } -} - -var ( - // Hex encoded string of nil sha256sum bytes. - emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - - // Sentinel URL is the default url value which is invalid. - sentinelURL = url.URL{} -) - -// Verify if input endpoint URL is valid. -func isValidEndpointURL(endpointURL url.URL) error { - if endpointURL == sentinelURL { - return errInvalidArgument("Endpoint url cannot be empty.") - } - if endpointURL.Path != "/" && endpointURL.Path != "" { - return errInvalidArgument("Endpoint url cannot have fully qualified paths.") - } - if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { - if !s3utils.IsAmazonEndpoint(endpointURL) { - return errInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") - } - } - if strings.Contains(endpointURL.Host, ".googleapis.com") { - if !s3utils.IsGoogleEndpoint(endpointURL) { - return errInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") - } - } - return nil -} - -// Verify if input expires value is valid. -func isValidExpiry(expires time.Duration) error { - expireSeconds := int64(expires / time.Second) - if expireSeconds < 1 { - return errInvalidArgument("Expires cannot be lesser than 1 second.") - } - if expireSeconds > 604800 { - return errInvalidArgument("Expires cannot be greater than 7 days.") - } - return nil -} - -// Extract only necessary metadata header key/values by -// filtering them out with a list of custom header keys. -func extractObjMetadata(header http.Header) http.Header { - preserveKeys := []string{ - "Content-Type", - "Cache-Control", - "Content-Encoding", - "Content-Language", - "Content-Disposition", - "X-Amz-Storage-Class", - "X-Amz-Object-Lock-Mode", - "X-Amz-Object-Lock-Retain-Until-Date", - "X-Amz-Object-Lock-Legal-Hold", - "X-Amz-Website-Redirect-Location", - "X-Amz-Server-Side-Encryption", - "X-Amz-Tagging-Count", - "X-Amz-Meta-", - // Add new headers to be preserved. - // if you add new headers here, please extend - // PutObjectOptions{} to preserve them - // upon upload as well. - } - filteredHeader := make(http.Header) - for k, v := range header { - var found bool - for _, prefix := range preserveKeys { - if !strings.HasPrefix(k, prefix) { - continue - } - found = true - break - } - if found { - filteredHeader[k] = v - } - } - return filteredHeader -} - -// ToObjectInfo converts http header values into ObjectInfo type, -// extracts metadata and fills in all the necessary fields in ObjectInfo. -func ToObjectInfo(bucketName string, objectName string, h http.Header) (ObjectInfo, error) { - var err error - // Trim off the odd double quotes from ETag in the beginning and end. - etag := trimEtag(h.Get("ETag")) - - // Parse content length is exists - var size int64 = -1 - contentLengthStr := h.Get("Content-Length") - if contentLengthStr != "" { - size, err = strconv.ParseInt(contentLengthStr, 10, 64) - if err != nil { - // Content-Length is not valid - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: fmt.Sprintf("Content-Length is not an integer, failed with %v", err), - BucketName: bucketName, - Key: objectName, - RequestID: h.Get("x-amz-request-id"), - HostID: h.Get("x-amz-id-2"), - Region: h.Get("x-amz-bucket-region"), - } - } - } - - // Parse Last-Modified has http time format. - date, err := time.Parse(http.TimeFormat, h.Get("Last-Modified")) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: fmt.Sprintf("Last-Modified time format is invalid, failed with %v", err), - BucketName: bucketName, - Key: objectName, - RequestID: h.Get("x-amz-request-id"), - HostID: h.Get("x-amz-id-2"), - Region: h.Get("x-amz-bucket-region"), - } - } - - // Fetch content type if any present. - contentType := strings.TrimSpace(h.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - expiryStr := h.Get("Expires") - var expiry time.Time - if expiryStr != "" { - expiry, _ = time.Parse(http.TimeFormat, expiryStr) - } - - metadata := extractObjMetadata(h) - userMetadata := make(map[string]string) - for k, v := range metadata { - if strings.HasPrefix(k, "X-Amz-Meta-") { - userMetadata[strings.TrimPrefix(k, "X-Amz-Meta-")] = v[0] - } - } - userTags := s3utils.TagDecode(h.Get(amzTaggingHeader)) - - var tagCount int - if count := h.Get(amzTaggingCount); count != "" { - tagCount, err = strconv.Atoi(count) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: fmt.Sprintf("x-amz-tagging-count is not an integer, failed with %v", err), - BucketName: bucketName, - Key: objectName, - RequestID: h.Get("x-amz-request-id"), - HostID: h.Get("x-amz-id-2"), - Region: h.Get("x-amz-bucket-region"), - } - } - } - - // extract lifecycle expiry date and rule ID - expTime, ruleID := amzExpirationToExpiryDateRuleID(h.Get(amzExpiration)) - - deleteMarker := h.Get(amzDeleteMarker) == "true" - - // Save object metadata info. - return ObjectInfo{ - ETag: etag, - Key: objectName, - Size: size, - LastModified: date, - ContentType: contentType, - Expires: expiry, - VersionID: h.Get(amzVersionID), - IsDeleteMarker: deleteMarker, - ReplicationStatus: h.Get(amzReplicationStatus), - Expiration: expTime, - ExpirationRuleID: ruleID, - // Extract only the relevant header keys describing the object. - // following function filters out a list of standard set of keys - // which are not part of object metadata. - Metadata: metadata, - UserMetadata: userMetadata, - UserTags: userTags, - UserTagCount: tagCount, - }, nil -} - -var readFull = func(r io.Reader, buf []byte) (n int, err error) { - // ReadFull reads exactly len(buf) bytes from r into buf. - // It returns the number of bytes copied and an error if - // fewer bytes were read. The error is EOF only if no bytes - // were read. If an EOF happens after reading some but not - // all the bytes, ReadFull returns ErrUnexpectedEOF. - // On return, n == len(buf) if and only if err == nil. - // If r returns an error having read at least len(buf) bytes, - // the error is dropped. - for n < len(buf) && err == nil { - var nn int - nn, err = r.Read(buf[n:]) - // Some spurious io.Reader's return - // io.ErrUnexpectedEOF when nn == 0 - // this behavior is undocumented - // so we are on purpose not using io.ReadFull - // implementation because this can lead - // to custom handling, to avoid that - // we simply modify the original io.ReadFull - // implementation to avoid this issue. - // io.ErrUnexpectedEOF with nn == 0 really - // means that io.EOF - if err == io.ErrUnexpectedEOF && nn == 0 { - err = io.EOF - } - n += nn - } - if n >= len(buf) { - err = nil - } else if n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// regCred matches credential string in HTTP header -var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") - -// regCred matches signature string in HTTP header -var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") - -// Redact out signature value from authorization string. -func redactSignature(origAuth string) string { - if !strings.HasPrefix(origAuth, signV4Algorithm) { - // Set a temporary redacted auth - return "AWS **REDACTED**:**REDACTED**" - } - - /// Signature V4 authorization header. - - // Strip out accessKeyID from: - // Credential=////aws4_request - newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") - - // Strip out 256-bit signature from: Signature=<256-bit signature> - return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") -} - -// Get default location returns the location based on the input -// URL `u`, if region override is provided then all location -// defaults to regionOverride. -// -// If no other cases match then the location is set to `us-east-1` -// as a last resort. -func getDefaultLocation(u url.URL, regionOverride string) (location string) { - if regionOverride != "" { - return regionOverride - } - region := s3utils.GetRegionFromURL(u) - if region == "" { - region = "us-east-1" - } - return region -} - -var supportedHeaders = []string{ - "content-type", - "cache-control", - "content-encoding", - "content-disposition", - "content-language", - "x-amz-website-redirect-location", - "x-amz-object-lock-mode", - "x-amz-metadata-directive", - "x-amz-object-lock-retain-until-date", - "expires", - "x-amz-replication-status", - // Add more supported headers here. -} - -// isStorageClassHeader returns true if the header is a supported storage class header -func isStorageClassHeader(headerKey string) bool { - return strings.EqualFold(amzStorageClass, headerKey) -} - -// isStandardHeader returns true if header is a supported header and not a custom header -func isStandardHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, header := range supportedHeaders { - if strings.ToLower(header) == key { - return true - } - } - return false -} - -// sseHeaders is list of server side encryption headers -var sseHeaders = []string{ - "x-amz-server-side-encryption", - "x-amz-server-side-encryption-aws-kms-key-id", - "x-amz-server-side-encryption-context", - "x-amz-server-side-encryption-customer-algorithm", - "x-amz-server-side-encryption-customer-key", - "x-amz-server-side-encryption-customer-key-MD5", -} - -// isSSEHeader returns true if header is a server side encryption header. -func isSSEHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, h := range sseHeaders { - if strings.ToLower(h) == key { - return true - } - } - return false -} - -// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. -func isAmzHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - - return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) -} - -var md5Pool = sync.Pool{New: func() interface{} { return md5.New() }} -var sha256Pool = sync.Pool{New: func() interface{} { return sha256.New() }} - -func newMd5Hasher() md5simd.Hasher { - return hashWrapper{Hash: md5Pool.New().(hash.Hash), isMD5: true} -} - -func newSHA256Hasher() md5simd.Hasher { - return hashWrapper{Hash: sha256Pool.New().(hash.Hash), isSHA256: true} -} - -// hashWrapper implements the md5simd.Hasher interface. -type hashWrapper struct { - hash.Hash - isMD5 bool - isSHA256 bool -} - -// Close will put the hasher back into the pool. -func (m hashWrapper) Close() { - if m.isMD5 && m.Hash != nil { - m.Reset() - md5Pool.Put(m.Hash) - } - if m.isSHA256 && m.Hash != nil { - m.Reset() - sha256Pool.Put(m.Hash) - } - m.Hash = nil -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/.gitignore b/mantle/vendor/github.com/minio/sha256-simd/.gitignore deleted file mode 100644 index c56069fe..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.test \ No newline at end of file diff --git a/mantle/vendor/github.com/minio/sha256-simd/.travis.yml b/mantle/vendor/github.com/minio/sha256-simd/.travis.yml deleted file mode 100644 index 4f85db53..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -sudo: required -dist: trusty -language: go - -os: -- linux - -go: -- tip -- 1.12.x - -env: -- ARCH=x86_64 -- ARCH=i686 - -matrix: - fast_finish: true - allow_failures: - - go: tip - -script: -- diff -au <(gofmt -d .) <(printf "") -- go test -race -v ./... -- go vet -asmdecl . -- ./test-architectures.sh diff --git a/mantle/vendor/github.com/minio/sha256-simd/README.md b/mantle/vendor/github.com/minio/sha256-simd/README.md deleted file mode 100644 index 5282d83a..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/README.md +++ /dev/null @@ -1,133 +0,0 @@ -# sha256-simd - -Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions and AVX2 for Intel and ARM64 for ARM. On AVX512 it provides an up to 8x improvement (over 3 GB/s per core) in comparison to AVX2. SHA Extensions give a performance boost of close to 4x over AVX2. - -## Introduction - -This package is designed as a replacement for `crypto/sha256`. For Intel CPUs it has two flavors for AVX512 and AVX2 (AVX/SSE are also supported). For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement. - -This package uses Golang assembly. The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al. - -## New: Support for Intel SHA Extensions - -Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)). - -``` -$ benchcmp avx2.txt sha-ext.txt -benchmark AVX2 MB/s SHA Ext MB/s speedup -BenchmarkHash5M 514.40 1975.17 3.84x -``` - -Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding, endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes, the other changes increased everything roughly 50%. - -## Support for AVX512 - -We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU): - -``` -$ benchcmp avx2.txt avx512.txt -benchmark AVX2 MB/s AVX512 MB/s speedup -BenchmarkHash5M 448.62 3498.20 7.80x -``` - -The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide). - -Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message. - -Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice. - -Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion: - -```go -import "github.com/minio/sha256-simd" - -func main() { - server := sha256.NewAvx512Server() - h512 := sha256.NewAvx512(server) - h512.Write(fileBlock) - digest := h512.Sum([]byte{}) -} -``` - -Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance: -* Have many go routines doing SHA256 calculations in parallel. -* Try to Write() messages in multiples of 64 bytes. -* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible). - -More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores. - -## Drop-In Replacement - -The following code snippet shows how you can use `github.com/minio/sha256-simd`. This will automatically select the fastest method for the architecture on which it will be executed. - -```go -import "github.com/minio/sha256-simd" - -func main() { - ... - shaWriter := sha256.New() - io.Copy(shaWriter, file) - ... -} -``` - -## Performance - -Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB. - -| Processor | SIMD | Speed (MB/s) | -| --------------------------------- | ------- | ------------:| -| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 | -| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 | -| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 | -| 3.0 GHz Intel Xeon Platinum 8124M | AVX2 | 449 | -| 3.1 GHz Intel Core i7 | AVX | 362 | -| 3.1 GHz Intel Core i7 | SSE | 299 | - -## asm2plan9s - -In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. - -## Why and benefits - -One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server. - -Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc. - -## ARM SHA Extensions - -The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)). - - ``` - sha256h q2, q3, v9.4s - sha256h2 q3, q4, v9.4s - sha256su0 v5.4s, v6.4s - rev32 v8.16b, v8.16b - add v9.4s, v7.4s, v18.4s - mov v4.16b, v2.16b - sha256h q2, q3, v10.4s - sha256h2 q3, q4, v10.4s - sha256su0 v6.4s, v7.4s - sha256su1 v5.4s, v7.4s, v8.4s - ``` - -### Detailed benchmarks - -Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/). - -``` -minio@minio-arm:$ benchcmp golang.txt arm64.txt -benchmark golang arm64 speedup -BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x -BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x -BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x -BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x -``` - -## License - -Released under the Apache License v2.0. You can find the complete text in the file LICENSE. - -## Contributing - -Contributions are welcome, please send PRs for any enhancements. diff --git a/mantle/vendor/github.com/minio/sha256-simd/appveyor.yml b/mantle/vendor/github.com/minio/sha256-simd/appveyor.yml deleted file mode 100644 index a66bfa9f..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -# Platform. -platform: x64 - -clone_folder: c:\gopath\src\github.com\minio\sha256-simd - -# environment variables -environment: - GOPATH: c:\gopath - GO15VENDOREXPERIMENT: 1 - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - -# to run your custom scripts instead of automatic MSBuild -build_script: - - go test . - - go test -race . - -# to disable automatic tests -test: off - -# to disable deployment -deploy: off diff --git a/mantle/vendor/github.com/minio/sha256-simd/cpuid.go b/mantle/vendor/github.com/minio/sha256-simd/cpuid.go deleted file mode 100644 index 878ad463..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/cpuid.go +++ /dev/null @@ -1,119 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -// True when SIMD instructions are available. -var avx512 bool -var avx2 bool -var avx bool -var sse bool -var sse2 bool -var sse3 bool -var ssse3 bool -var sse41 bool -var sse42 bool -var popcnt bool -var sha bool -var armSha = haveArmSha() - -func init() { - var _xsave bool - var _osxsave bool - var _avx bool - var _avx2 bool - var _avx512f bool - var _avx512dq bool - // var _avx512pf bool - // var _avx512er bool - // var _avx512cd bool - var _avx512bw bool - var _avx512vl bool - var _sseState bool - var _avxState bool - var _opmaskState bool - var _zmmHI256State bool - var _hi16ZmmState bool - - mfi, _, _, _ := cpuid(0) - - if mfi >= 1 { - _, _, c, d := cpuid(1) - - sse = (d & (1 << 25)) != 0 - sse2 = (d & (1 << 26)) != 0 - sse3 = (c & (1 << 0)) != 0 - ssse3 = (c & (1 << 9)) != 0 - sse41 = (c & (1 << 19)) != 0 - sse42 = (c & (1 << 20)) != 0 - popcnt = (c & (1 << 23)) != 0 - _xsave = (c & (1 << 26)) != 0 - _osxsave = (c & (1 << 27)) != 0 - _avx = (c & (1 << 28)) != 0 - } - - if mfi >= 7 { - _, b, _, _ := cpuid(7) - - _avx2 = (b & (1 << 5)) != 0 - _avx512f = (b & (1 << 16)) != 0 - _avx512dq = (b & (1 << 17)) != 0 - // _avx512pf = (b & (1 << 26)) != 0 - // _avx512er = (b & (1 << 27)) != 0 - // _avx512cd = (b & (1 << 28)) != 0 - _avx512bw = (b & (1 << 30)) != 0 - _avx512vl = (b & (1 << 31)) != 0 - sha = (b & (1 << 29)) != 0 - } - - // Stop here if XSAVE unsupported or not enabled - if !_xsave || !_osxsave { - return - } - - if _xsave && _osxsave { - a, _ := xgetbv(0) - - _sseState = (a & (1 << 1)) != 0 - _avxState = (a & (1 << 2)) != 0 - _opmaskState = (a & (1 << 5)) != 0 - _zmmHI256State = (a & (1 << 6)) != 0 - _hi16ZmmState = (a & (1 << 7)) != 0 - } else { - _sseState = true - } - - // Very unlikely that OS would enable XSAVE and then disable SSE - if !_sseState { - sse = false - sse2 = false - sse3 = false - ssse3 = false - sse41 = false - sse42 = false - } - - if _avxState { - avx = _avx - avx2 = _avx2 - } - - if _opmaskState && _zmmHI256State && _hi16ZmmState { - avx512 = (_avx512f && - _avx512dq && - _avx512bw && - _avx512vl) - } -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/cpuid_386.go b/mantle/vendor/github.com/minio/sha256-simd/cpuid_386.go deleted file mode 100644 index c9890be4..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/cpuid_386.go +++ /dev/null @@ -1,24 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func xgetbv(index uint32) (eax, edx uint32) - -func haveArmSha() bool { - return false -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/cpuid_386.s b/mantle/vendor/github.com/minio/sha256-simd/cpuid_386.s deleted file mode 100644 index 1511cd6f..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/cpuid_386.s +++ /dev/null @@ -1,53 +0,0 @@ -// The MIT License (MIT) -// -// Copyright (c) 2015 Klaus Post -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// +build 386,!gccgo - -// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuid(SB), 7, $0 - XORL CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+4(FP) - MOVL BX, ebx+8(FP) - MOVL CX, ecx+12(FP) - MOVL DX, edx+16(FP) - RET - -// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·xgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+4(FP) - MOVL DX, edx+8(FP) - RET diff --git a/mantle/vendor/github.com/minio/sha256-simd/cpuid_amd64.go b/mantle/vendor/github.com/minio/sha256-simd/cpuid_amd64.go deleted file mode 100644 index c9890be4..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/cpuid_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func xgetbv(index uint32) (eax, edx uint32) - -func haveArmSha() bool { - return false -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/cpuid_amd64.s b/mantle/vendor/github.com/minio/sha256-simd/cpuid_amd64.s deleted file mode 100644 index b0f41474..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/cpuid_amd64.s +++ /dev/null @@ -1,53 +0,0 @@ -// The MIT License (MIT) -// -// Copyright (c) 2015 Klaus Post -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// +build amd64,!gccgo - -// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuid(SB), 7, $0 - XORQ CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·cpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·xgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/mantle/vendor/github.com/minio/sha256-simd/cpuid_arm.go b/mantle/vendor/github.com/minio/sha256-simd/cpuid_arm.go deleted file mode 100644 index 351dff4b..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/cpuid_arm.go +++ /dev/null @@ -1,32 +0,0 @@ -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package sha256 - -func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} - -func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 -} - -func xgetbv(index uint32) (eax, edx uint32) { - return 0, 0 -} - -func haveArmSha() bool { - return false -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/go.mod b/mantle/vendor/github.com/minio/sha256-simd/go.mod deleted file mode 100644 index 4451e9eb..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/minio/sha256-simd - -go 1.12 diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256.go b/mantle/vendor/github.com/minio/sha256-simd/sha256.go deleted file mode 100644 index 4e1f6d2f..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256.go +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -import ( - "crypto/sha256" - "encoding/binary" - "hash" - "runtime" -) - -// Size - The size of a SHA256 checksum in bytes. -const Size = 32 - -// BlockSize - The blocksize of SHA256 in bytes. -const BlockSize = 64 - -const ( - chunk = BlockSize - init0 = 0x6A09E667 - init1 = 0xBB67AE85 - init2 = 0x3C6EF372 - init3 = 0xA54FF53A - init4 = 0x510E527F - init5 = 0x9B05688C - init6 = 0x1F83D9AB - init7 = 0x5BE0CD19 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - h [8]uint32 - x [chunk]byte - nx int - len uint64 -} - -// Reset digest back to default -func (d *digest) Reset() { - d.h[0] = init0 - d.h[1] = init1 - d.h[2] = init2 - d.h[3] = init3 - d.h[4] = init4 - d.h[5] = init5 - d.h[6] = init6 - d.h[7] = init7 - d.nx = 0 - d.len = 0 -} - -type blockfuncType int - -const ( - blockfuncGeneric blockfuncType = iota - blockfuncAvx512 blockfuncType = iota - blockfuncAvx2 blockfuncType = iota - blockfuncAvx blockfuncType = iota - blockfuncSsse blockfuncType = iota - blockfuncSha blockfuncType = iota - blockfuncArm blockfuncType = iota -) - -var blockfunc blockfuncType - -func init() { - is386bit := runtime.GOARCH == "386" - isARM := runtime.GOARCH == "arm" - switch { - case is386bit || isARM: - blockfunc = blockfuncGeneric - case sha && ssse3 && sse41: - blockfunc = blockfuncSha - case avx2: - blockfunc = blockfuncAvx2 - case avx: - blockfunc = blockfuncAvx - case ssse3: - blockfunc = blockfuncSsse - case armSha: - blockfunc = blockfuncArm - default: - blockfunc = blockfuncGeneric - } -} - -// New returns a new hash.Hash computing the SHA256 checksum. -func New() hash.Hash { - if blockfunc != blockfuncGeneric { - d := new(digest) - d.Reset() - return d - } - // Fallback to the standard golang implementation - // if no features were found. - return sha256.New() -} - -// Sum256 - single caller sha256 helper -func Sum256(data []byte) (result [Size]byte) { - var d digest - d.Reset() - d.Write(data) - result = d.checkSum() - return -} - -// Return size of checksum -func (d *digest) Size() int { return Size } - -// Return blocksize of checksum -func (d *digest) BlockSize() int { return BlockSize } - -// Write to digest -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - block(d, d.x[:]) - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - block(d, p[:n]) - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -// Return sha256 sum in bytes -func (d *digest) Sum(in []byte) []byte { - // Make a copy of d0 so that caller can keep writing and summing. - d0 := *d - hash := d0.checkSum() - return append(in, hash[:]...) -} - -// Intermediate checksum function -func (d *digest) checkSum() (digest [Size]byte) { - n := d.nx - - var k [64]byte - copy(k[:], d.x[:n]) - - k[n] = 0x80 - - if n >= 56 { - block(d, k[:]) - - // clear block buffer - go compiles this to optimal 1x xorps + 4x movups - // unfortunately expressing this more succinctly results in much worse code - k[0] = 0 - k[1] = 0 - k[2] = 0 - k[3] = 0 - k[4] = 0 - k[5] = 0 - k[6] = 0 - k[7] = 0 - k[8] = 0 - k[9] = 0 - k[10] = 0 - k[11] = 0 - k[12] = 0 - k[13] = 0 - k[14] = 0 - k[15] = 0 - k[16] = 0 - k[17] = 0 - k[18] = 0 - k[19] = 0 - k[20] = 0 - k[21] = 0 - k[22] = 0 - k[23] = 0 - k[24] = 0 - k[25] = 0 - k[26] = 0 - k[27] = 0 - k[28] = 0 - k[29] = 0 - k[30] = 0 - k[31] = 0 - k[32] = 0 - k[33] = 0 - k[34] = 0 - k[35] = 0 - k[36] = 0 - k[37] = 0 - k[38] = 0 - k[39] = 0 - k[40] = 0 - k[41] = 0 - k[42] = 0 - k[43] = 0 - k[44] = 0 - k[45] = 0 - k[46] = 0 - k[47] = 0 - k[48] = 0 - k[49] = 0 - k[50] = 0 - k[51] = 0 - k[52] = 0 - k[53] = 0 - k[54] = 0 - k[55] = 0 - k[56] = 0 - k[57] = 0 - k[58] = 0 - k[59] = 0 - k[60] = 0 - k[61] = 0 - k[62] = 0 - k[63] = 0 - } - binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3) - block(d, k[:]) - - { - const i = 0 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 1 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 2 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 3 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 4 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 5 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 6 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - { - const i = 7 - binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) - } - - return -} - -func block(dig *digest, p []byte) { - if blockfunc == blockfuncSha { - blockShaGo(dig, p) - } else if blockfunc == blockfuncAvx2 { - blockAvx2Go(dig, p) - } else if blockfunc == blockfuncAvx { - blockAvxGo(dig, p) - } else if blockfunc == blockfuncSsse { - blockSsseGo(dig, p) - } else if blockfunc == blockfuncArm { - blockArmGo(dig, p) - } else if blockfunc == blockfuncGeneric { - blockGeneric(dig, p) - } -} - -func blockGeneric(dig *digest, p []byte) { - var w [64]uint32 - h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] - for len(p) >= chunk { - // Can interlace the computation of w with the - // rounds below if needed for speed. - for i := 0; i < 16; i++ { - j := i * 4 - w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) - } - for i := 16; i < 64; i++ { - v1 := w[i-2] - t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) - v2 := w[i-15] - t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) - w[i] = t1 + w[i-7] + t2 + w[i-16] - } - - a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 - - for i := 0; i < 64; i++ { - t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] - - t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) - - h = g - g = f - f = e - e = d + t1 - d = c - c = b - b = a - a = t1 + t2 - } - - h0 += a - h1 += b - h2 += c - h3 += d - h4 += e - h5 += f - h6 += g - h7 += h - - p = p[chunk:] - } - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 -} - -var _K = []uint32{ - 0x428a2f98, - 0x71374491, - 0xb5c0fbcf, - 0xe9b5dba5, - 0x3956c25b, - 0x59f111f1, - 0x923f82a4, - 0xab1c5ed5, - 0xd807aa98, - 0x12835b01, - 0x243185be, - 0x550c7dc3, - 0x72be5d74, - 0x80deb1fe, - 0x9bdc06a7, - 0xc19bf174, - 0xe49b69c1, - 0xefbe4786, - 0x0fc19dc6, - 0x240ca1cc, - 0x2de92c6f, - 0x4a7484aa, - 0x5cb0a9dc, - 0x76f988da, - 0x983e5152, - 0xa831c66d, - 0xb00327c8, - 0xbf597fc7, - 0xc6e00bf3, - 0xd5a79147, - 0x06ca6351, - 0x14292967, - 0x27b70a85, - 0x2e1b2138, - 0x4d2c6dfc, - 0x53380d13, - 0x650a7354, - 0x766a0abb, - 0x81c2c92e, - 0x92722c85, - 0xa2bfe8a1, - 0xa81a664b, - 0xc24b8b70, - 0xc76c51a3, - 0xd192e819, - 0xd6990624, - 0xf40e3585, - 0x106aa070, - 0x19a4c116, - 0x1e376c08, - 0x2748774c, - 0x34b0bcb5, - 0x391c0cb3, - 0x4ed8aa4a, - 0x5b9cca4f, - 0x682e6ff3, - 0x748f82ee, - 0x78a5636f, - 0x84c87814, - 0x8cc70208, - 0x90befffa, - 0xa4506ceb, - 0xbef9a3f7, - 0xc67178f2, -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go b/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go deleted file mode 100644 index 52fcaee6..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -//go:noescape -func blockAvx2(h []uint32, message []uint8) diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s b/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s deleted file mode 100644 index 80b0b739..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s +++ /dev/null @@ -1,1449 +0,0 @@ -//+build !noasm,!appengine - -// SHA256 implementation for AVX2 - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// This code is based on an Intel White-Paper: -// "Fast SHA-256 Implementations on Intel Architecture Processors" -// -// together with the reference implementation from the following authors: -// James Guilford -// Kirk Yap -// Tim Chen -// -// For Golang it has been converted to Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 -// equivalents -// - -DATA K256<>+0x000(SB)/8, $0x71374491428a2f98 -DATA K256<>+0x008(SB)/8, $0xe9b5dba5b5c0fbcf -DATA K256<>+0x010(SB)/8, $0x71374491428a2f98 -DATA K256<>+0x018(SB)/8, $0xe9b5dba5b5c0fbcf -DATA K256<>+0x020(SB)/8, $0x59f111f13956c25b -DATA K256<>+0x028(SB)/8, $0xab1c5ed5923f82a4 -DATA K256<>+0x030(SB)/8, $0x59f111f13956c25b -DATA K256<>+0x038(SB)/8, $0xab1c5ed5923f82a4 -DATA K256<>+0x040(SB)/8, $0x12835b01d807aa98 -DATA K256<>+0x048(SB)/8, $0x550c7dc3243185be -DATA K256<>+0x050(SB)/8, $0x12835b01d807aa98 -DATA K256<>+0x058(SB)/8, $0x550c7dc3243185be -DATA K256<>+0x060(SB)/8, $0x80deb1fe72be5d74 -DATA K256<>+0x068(SB)/8, $0xc19bf1749bdc06a7 -DATA K256<>+0x070(SB)/8, $0x80deb1fe72be5d74 -DATA K256<>+0x078(SB)/8, $0xc19bf1749bdc06a7 -DATA K256<>+0x080(SB)/8, $0xefbe4786e49b69c1 -DATA K256<>+0x088(SB)/8, $0x240ca1cc0fc19dc6 -DATA K256<>+0x090(SB)/8, $0xefbe4786e49b69c1 -DATA K256<>+0x098(SB)/8, $0x240ca1cc0fc19dc6 -DATA K256<>+0x0a0(SB)/8, $0x4a7484aa2de92c6f -DATA K256<>+0x0a8(SB)/8, $0x76f988da5cb0a9dc -DATA K256<>+0x0b0(SB)/8, $0x4a7484aa2de92c6f -DATA K256<>+0x0b8(SB)/8, $0x76f988da5cb0a9dc -DATA K256<>+0x0c0(SB)/8, $0xa831c66d983e5152 -DATA K256<>+0x0c8(SB)/8, $0xbf597fc7b00327c8 -DATA K256<>+0x0d0(SB)/8, $0xa831c66d983e5152 -DATA K256<>+0x0d8(SB)/8, $0xbf597fc7b00327c8 -DATA K256<>+0x0e0(SB)/8, $0xd5a79147c6e00bf3 -DATA K256<>+0x0e8(SB)/8, $0x1429296706ca6351 -DATA K256<>+0x0f0(SB)/8, $0xd5a79147c6e00bf3 -DATA K256<>+0x0f8(SB)/8, $0x1429296706ca6351 -DATA K256<>+0x100(SB)/8, $0x2e1b213827b70a85 -DATA K256<>+0x108(SB)/8, $0x53380d134d2c6dfc -DATA K256<>+0x110(SB)/8, $0x2e1b213827b70a85 -DATA K256<>+0x118(SB)/8, $0x53380d134d2c6dfc -DATA K256<>+0x120(SB)/8, $0x766a0abb650a7354 -DATA K256<>+0x128(SB)/8, $0x92722c8581c2c92e -DATA K256<>+0x130(SB)/8, $0x766a0abb650a7354 -DATA K256<>+0x138(SB)/8, $0x92722c8581c2c92e -DATA K256<>+0x140(SB)/8, $0xa81a664ba2bfe8a1 -DATA K256<>+0x148(SB)/8, $0xc76c51a3c24b8b70 -DATA K256<>+0x150(SB)/8, $0xa81a664ba2bfe8a1 -DATA K256<>+0x158(SB)/8, $0xc76c51a3c24b8b70 -DATA K256<>+0x160(SB)/8, $0xd6990624d192e819 -DATA K256<>+0x168(SB)/8, $0x106aa070f40e3585 -DATA K256<>+0x170(SB)/8, $0xd6990624d192e819 -DATA K256<>+0x178(SB)/8, $0x106aa070f40e3585 -DATA K256<>+0x180(SB)/8, $0x1e376c0819a4c116 -DATA K256<>+0x188(SB)/8, $0x34b0bcb52748774c -DATA K256<>+0x190(SB)/8, $0x1e376c0819a4c116 -DATA K256<>+0x198(SB)/8, $0x34b0bcb52748774c -DATA K256<>+0x1a0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA K256<>+0x1a8(SB)/8, $0x682e6ff35b9cca4f -DATA K256<>+0x1b0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA K256<>+0x1b8(SB)/8, $0x682e6ff35b9cca4f -DATA K256<>+0x1c0(SB)/8, $0x78a5636f748f82ee -DATA K256<>+0x1c8(SB)/8, $0x8cc7020884c87814 -DATA K256<>+0x1d0(SB)/8, $0x78a5636f748f82ee -DATA K256<>+0x1d8(SB)/8, $0x8cc7020884c87814 -DATA K256<>+0x1e0(SB)/8, $0xa4506ceb90befffa -DATA K256<>+0x1e8(SB)/8, $0xc67178f2bef9a3f7 -DATA K256<>+0x1f0(SB)/8, $0xa4506ceb90befffa -DATA K256<>+0x1f8(SB)/8, $0xc67178f2bef9a3f7 - -DATA K256<>+0x200(SB)/8, $0x0405060700010203 -DATA K256<>+0x208(SB)/8, $0x0c0d0e0f08090a0b -DATA K256<>+0x210(SB)/8, $0x0405060700010203 -DATA K256<>+0x218(SB)/8, $0x0c0d0e0f08090a0b -DATA K256<>+0x220(SB)/8, $0x0b0a090803020100 -DATA K256<>+0x228(SB)/8, $0xffffffffffffffff -DATA K256<>+0x230(SB)/8, $0x0b0a090803020100 -DATA K256<>+0x238(SB)/8, $0xffffffffffffffff -DATA K256<>+0x240(SB)/8, $0xffffffffffffffff -DATA K256<>+0x248(SB)/8, $0x0b0a090803020100 -DATA K256<>+0x250(SB)/8, $0xffffffffffffffff -DATA K256<>+0x258(SB)/8, $0x0b0a090803020100 - -GLOBL K256<>(SB), 8, $608 - -// We need 0x220 stack space aligned on a 512 boundary, so for the -// worstcase-aligned SP we need twice this amount, being 1088 (=0x440) -// -// SP aligned end-aligned stacksize -// 100013d0 10001400 10001620 592 -// 100013d8 10001400 10001620 584 -// 100013e0 10001600 10001820 1088 -// 100013e8 10001600 10001820 1080 - -// func blockAvx2(h []uint32, message []uint8) -TEXT ·blockAvx2(SB),$1088-48 - - MOVQ h+0(FP), DI // DI: &h - MOVQ message_base+24(FP), SI // SI: &message - MOVQ message_len+32(FP), DX // len(message) - ADDQ SI, DX // end pointer of input - MOVQ SP, R11 // copy stack pointer - ADDQ $0x220, SP // sp += 0x220 - ANDQ $0xfffffffffffffe00, SP // align stack frame - ADDQ $0x1c0, SP - MOVQ DI, 0x40(SP) // save ctx - MOVQ SI, 0x48(SP) // save input - MOVQ DX, 0x50(SP) // save end pointer - MOVQ R11, 0x58(SP) // save copy of stack pointer - - WORD $0xf8c5; BYTE $0x77 // vzeroupper - ADDQ $0x40, SI // input++ - MOVL (DI), AX - MOVQ SI, R12 // borrow $T1 - MOVL 4(DI), BX - CMPQ SI, DX // $_end - MOVL 8(DI), CX - LONG $0xe4440f4c // cmove r12,rsp /* next block or random data */ - MOVL 12(DI), DX - MOVL 16(DI), R8 - MOVL 20(DI), R9 - MOVL 24(DI), R10 - MOVL 28(DI), R11 - - LEAQ K256<>(SB), BP - LONG $0x856f7dc5; LONG $0x00000220 // VMOVDQA YMM8, 0x220[rbp] /* vmovdqa ymm8,YMMWORD PTR [rip+0x220] */ - LONG $0x8d6f7dc5; LONG $0x00000240 // VMOVDQA YMM9, 0x240[rbp] /* vmovdqa ymm9,YMMWORD PTR [rip+0x240] */ - LONG $0x956f7dc5; LONG $0x00000200 // VMOVDQA YMM10, 0x200[rbp] /* vmovdqa ymm7,YMMWORD PTR [rip+0x200] */ - -loop0: - LONG $0x6f7dc1c4; BYTE $0xfa // VMOVDQA YMM7, YMM10 - - // Load first 16 dwords from two blocks - MOVOU -64(SI), X0 // vmovdqu xmm0,XMMWORD PTR [rsi-0x40] - MOVOU -48(SI), X1 // vmovdqu xmm1,XMMWORD PTR [rsi-0x30] - MOVOU -32(SI), X2 // vmovdqu xmm2,XMMWORD PTR [rsi-0x20] - MOVOU -16(SI), X3 // vmovdqu xmm3,XMMWORD PTR [rsi-0x10] - - // Byte swap data and transpose data into high/low - LONG $0x387dc3c4; WORD $0x2404; BYTE $0x01 // vinserti128 ymm0,ymm0,[r12],0x1 - LONG $0x3875c3c4; LONG $0x0110244c // vinserti128 ymm1,ymm1,0x10[r12],0x1 - LONG $0x007de2c4; BYTE $0xc7 // vpshufb ymm0,ymm0,ymm7 - LONG $0x386dc3c4; LONG $0x01202454 // vinserti128 ymm2,ymm2,0x20[r12],0x1 - LONG $0x0075e2c4; BYTE $0xcf // vpshufb ymm1,ymm1,ymm7 - LONG $0x3865c3c4; LONG $0x0130245c // vinserti128 ymm3,ymm3,0x30[r12],0x1 - - LEAQ K256<>(SB), BP - LONG $0x006de2c4; BYTE $0xd7 // vpshufb ymm2,ymm2,ymm7 - LONG $0x65fefdc5; BYTE $0x00 // vpaddd ymm4,ymm0,[rbp] - LONG $0x0065e2c4; BYTE $0xdf // vpshufb ymm3,ymm3,ymm7 - LONG $0x6dfef5c5; BYTE $0x20 // vpaddd ymm5,ymm1,0x20[rbp] - LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,0x40[rbp] - LONG $0x7dfee5c5; BYTE $0x60 // vpaddd ymm7,ymm3,0x60[rbp] - - LONG $0x247ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm4 - XORQ R14, R14 - LONG $0x6c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm5 - - ADDQ $-0x40, SP - MOVQ BX, DI - LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 - XORQ CX, DI // magic - LONG $0x7c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm7 - MOVQ R9, R12 - ADDQ $0x80, BP - -loop1: - // Schedule 48 input dwords, by doing 3 rounds of 12 each - // Note: SIMD instructions are interleaved with the SHA calculations - ADDQ $-0x40, SP - LONG $0x0f75e3c4; WORD $0x04e0 // vpalignr ymm4,ymm1,ymm0,0x4 - - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80) - LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0x0f65e3c4; WORD $0x04fa // vpalignr ymm7,ymm3,ymm2,0x4 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0xc7fefdc5 // vpaddd ymm0,ymm0,ymm7 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - LONG $0xfb70fdc5; BYTE $0xfa // vpshufd ymm7,ymm3,0xfa - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84) - LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - LONG $0xc4fefdc5 // vpaddd ymm0,ymm0,ymm4 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88) - LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6 - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xf870fdc5; BYTE $0x50 // vpshufd ymm7,ymm0,0x50 - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c) - LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6 - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0x75fefdc5; BYTE $0x00 // vpaddd ymm6,ymm0,[rbp+0x0] - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 - LONG $0x0f6de3c4; WORD $0x04e1 // vpalignr ymm4,ymm2,ymm1,0x4 - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0) - LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0x0f7de3c4; WORD $0x04fb // vpalignr ymm7,ymm0,ymm3,0x4 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0xcffef5c5 // vpaddd ymm1,ymm1,ymm7 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - LONG $0xf870fdc5; BYTE $0xfa // vpshufd ymm7,ymm0,0xfa - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4) - LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - LONG $0xccfef5c5 // vpaddd ymm1,ymm1,ymm4 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8) - LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6 - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xf970fdc5; BYTE $0x50 // vpshufd ymm7,ymm1,0x50 - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac) - LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6 - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0x75fef5c5; BYTE $0x20 // vpaddd ymm6,ymm1,[rbp+0x20] - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6 - - LONG $0x24648d48; BYTE $0xc0 // lea rsp,[rsp-0x40] - LONG $0x0f65e3c4; WORD $0x04e2 // vpalignr ymm4,ymm3,ymm2,0x4 - - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80) - LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0x0f75e3c4; WORD $0x04f8 // vpalignr ymm7,ymm1,ymm0,0x4 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0xd7feedc5 // vpaddd ymm2,ymm2,ymm7 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - LONG $0xf970fdc5; BYTE $0xfa // vpshufd ymm7,ymm1,0xfa - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84) - LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - LONG $0xd4feedc5 // vpaddd ymm2,ymm2,ymm4 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88) - LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6 - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xfa70fdc5; BYTE $0x50 // vpshufd ymm7,ymm2,0x50 - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c) - LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6 - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,[rbp+0x40] - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 - LONG $0x0f7de3c4; WORD $0x04e3 // vpalignr ymm4,ymm0,ymm3,0x4 - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0) - LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0x0f6de3c4; WORD $0x04f9 // vpalignr ymm7,ymm2,ymm1,0x4 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0xdffee5c5 // vpaddd ymm3,ymm3,ymm7 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - LONG $0xfa70fdc5; BYTE $0xfa // vpshufd ymm7,ymm2,0xfa - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4) - LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - LONG $0xdcfee5c5 // vpaddd ymm3,ymm3,ymm4 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8) - LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6 - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xfb70fdc5; BYTE $0x50 // vpshufd ymm7,ymm3,0x50 - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac) - LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6 - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0x75fee5c5; BYTE $0x60 // vpaddd ymm6,ymm3,[rbp+0x60] - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6 - ADDQ $0x80, BP - - CMPB 0x3(BP), $0x0 - JNE loop1 - - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x40) - LONG $0x245c0344; BYTE $0x40 // add r11d,[rsp+0x40] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x44) - LONG $0x24540344; BYTE $0x44 // add r10d,[rsp+0x44] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x48) - LONG $0x244c0344; BYTE $0x48 // add r9d,[rsp+0x48] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x4c) - LONG $0x24440344; BYTE $0x4c // add r8d,[rsp+0x4c] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x60) - LONG $0x60245403 // add edx,[rsp+0x60] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x64) - LONG $0x64244c03 // add ecx,[rsp+0x64] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x68) - LONG $0x68245c03 // add ebx,[rsp+0x68] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x6c) - LONG $0x6c244403 // add eax,[rsp+0x6c] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x00) - LONG $0x241c0344 // add r11d,[rsp] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x04) - LONG $0x24540344; BYTE $0x04 // add r10d,[rsp+0x4] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x08) - LONG $0x244c0344; BYTE $0x08 // add r9d,[rsp+0x8] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x0c) - LONG $0x24440344; BYTE $0x0c // add r8d,[rsp+0xc] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x20) - LONG $0x20245403 // add edx,[rsp+0x20] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x24) - LONG $0x24244c03 // add ecx,[rsp+0x24] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x28) - LONG $0x28245c03 // add ebx,[rsp+0x28] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x2c) - LONG $0x2c244403 // add eax,[rsp+0x2c] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - MOVQ 0x200(SP), DI // $_ctx - ADDQ R14, AX - - LEAQ 0x1c0(SP), BP - - ADDL (DI), AX - ADDL 4(DI), BX - ADDL 8(DI), CX - ADDL 12(DI), DX - ADDL 16(DI), R8 - ADDL 20(DI), R9 - ADDL 24(DI), R10 - ADDL 28(DI), R11 - - MOVL AX, (DI) - MOVL BX, 4(DI) - MOVL CX, 8(DI) - MOVL DX, 12(DI) - MOVL R8, 16(DI) - MOVL R9, 20(DI) - MOVL R10, 24(DI) - MOVL R11, 28(DI) - - CMPQ SI, 0x50(BP) // $_end - JE done - - XORQ R14, R14 - MOVQ BX, DI - XORQ CX, DI // magic - MOVQ R9, R12 - -loop2: - // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, BP, 0x10) - LONG $0x105d0344 // add r11d,[rbp+0x10] - WORD $0x2145; BYTE $0xc4 // and r12d,r8d - LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 - LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb - LONG $0x30048d42 // lea eax,[rax+r14*1] - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 - LONG $0x231c8d47 // lea r11d,[r11+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xc7 // mov r15d,eax - LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 - LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] - WORD $0x3141; BYTE $0xdf // xor r15d,ebx - LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd - LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 - LONG $0x1a148d42 // lea edx,[rdx+r11*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xdf31 // xor edi,ebx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] - WORD $0x8945; BYTE $0xc4 // mov r12d,r8d - - // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, BP, 0x14) - LONG $0x14550344 // add r10d,[rbp+0x14] - WORD $0x2141; BYTE $0xd4 // and r12d,edx - LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 - LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb - LONG $0x331c8d47 // lea r11d,[r11+r14*1] - LONG $0x22148d47 // lea r10d,[r10+r12*1] - LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 - LONG $0x22148d47 // lea r10d,[r10+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xdf // mov edi,r11d - LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 - LONG $0x2a148d47 // lea r10d,[r10+r13*1] - WORD $0xc731 // xor edi,eax - LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd - LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 - LONG $0x110c8d42 // lea ecx,[rcx+r10*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xc7 // xor r15d,eax - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x3a148d47 // lea r10d,[r10+r15*1] - WORD $0x8941; BYTE $0xd4 // mov r12d,edx - - // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, BP, 0x18) - LONG $0x184d0344 // add r9d,[rbp+0x18] - WORD $0x2141; BYTE $0xcc // and r12d,ecx - LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 - LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb - LONG $0x32148d47 // lea r10d,[r10+r14*1] - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 - LONG $0x210c8d47 // lea r9d,[r9+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xd7 // mov r15d,r10d - LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 - LONG $0x290c8d47 // lea r9d,[r9+r13*1] - WORD $0x3145; BYTE $0xdf // xor r15d,r11d - LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd - LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 - LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xdf // xor edi,r11d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d45 // lea r9d,[r9+rdi*1] - WORD $0x8941; BYTE $0xcc // mov r12d,ecx - - // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, BP, 0x1c) - LONG $0x1c450344 // add r8d,[rbp+0x1c] - WORD $0x2141; BYTE $0xdc // and r12d,ebx - LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 - LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb - LONG $0x310c8d47 // lea r9d,[r9+r14*1] - LONG $0x20048d47 // lea r8d,[r8+r12*1] - LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 - LONG $0x20048d47 // lea r8d,[r8+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8944; BYTE $0xcf // mov edi,r9d - LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 - LONG $0x28048d47 // lea r8d,[r8+r13*1] - WORD $0x3144; BYTE $0xd7 // xor edi,r10d - LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd - LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 - LONG $0x00048d42 // lea eax,[rax+r8*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xd7 // xor r15d,r10d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d47 // lea r8d,[r8+r15*1] - WORD $0x8941; BYTE $0xdc // mov r12d,ebx - - // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, BP, 0x30) - WORD $0x5503; BYTE $0x30 // add edx,[rbp+0x30] - WORD $0x2141; BYTE $0xc4 // and r12d,eax - LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 - LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb - LONG $0x30048d47 // lea r8d,[r8+r14*1] - LONG $0x22148d42 // lea edx,[rdx+r12*1] - LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 - LONG $0x22148d42 // lea edx,[rdx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8945; BYTE $0xc7 // mov r15d,r8d - LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 - LONG $0x2a148d42 // lea edx,[rdx+r13*1] - WORD $0x3145; BYTE $0xcf // xor r15d,r9d - LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd - LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 - LONG $0x131c8d45 // lea r11d,[r11+rdx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3144; BYTE $0xcf // xor edi,r9d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] - WORD $0x8941; BYTE $0xc4 // mov r12d,eax - - // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, BP, 0x34) - WORD $0x4d03; BYTE $0x34 // add ecx,[rbp+0x34] - WORD $0x2145; BYTE $0xdc // and r12d,r11d - LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 - LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb - LONG $0x32148d42 // lea edx,[rdx+r14*1] - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 - LONG $0x210c8d42 // lea ecx,[rcx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xd789 // mov edi,edx - LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 - LONG $0x290c8d42 // lea ecx,[rcx+r13*1] - WORD $0x3144; BYTE $0xc7 // xor edi,r8d - LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd - LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 - LONG $0x0a148d45 // lea r10d,[r10+rcx*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3145; BYTE $0xc7 // xor r15d,r8d - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x390c8d42 // lea ecx,[rcx+r15*1] - WORD $0x8945; BYTE $0xdc // mov r12d,r11d - - // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, BP, 0x38) - WORD $0x5d03; BYTE $0x38 // add ebx,[rbp+0x38] - WORD $0x2145; BYTE $0xd4 // and r12d,r10d - LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 - LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb - LONG $0x310c8d42 // lea ecx,[rcx+r14*1] - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax - WORD $0x3145; BYTE $0xfd // xor r13d,r15d - LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 - LONG $0x231c8d42 // lea ebx,[rbx+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0x8941; BYTE $0xcf // mov r15d,ecx - LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 - LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] - WORD $0x3141; BYTE $0xd7 // xor r15d,edx - LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd - LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 - LONG $0x190c8d45 // lea r9d,[r9+rbx*1] - WORD $0x2144; BYTE $0xff // and edi,r15d - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0xd731 // xor edi,edx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] - WORD $0x8945; BYTE $0xd4 // mov r12d,r10d - - // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, BP, 0x3c) - WORD $0x4503; BYTE $0x3c // add eax,[rbp+0x3c] - WORD $0x2145; BYTE $0xcc // and r12d,r9d - LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 - LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb - LONG $0x331c8d42 // lea ebx,[rbx+r14*1] - LONG $0x20048d42 // lea eax,[rax+r12*1] - LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d - WORD $0x3141; BYTE $0xfd // xor r13d,edi - LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 - LONG $0x20048d42 // lea eax,[rax+r12*1] - WORD $0x3145; BYTE $0xf5 // xor r13d,r14d - WORD $0xdf89 // mov edi,ebx - LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 - LONG $0x28048d42 // lea eax,[rax+r13*1] - WORD $0xcf31 // xor edi,ecx - LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd - LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 - LONG $0x00048d45 // lea r8d,[r8+rax*1] - WORD $0x2141; BYTE $0xff // and r15d,edi - WORD $0x3145; BYTE $0xe6 // xor r14d,r12d - WORD $0x3141; BYTE $0xcf // xor r15d,ecx - WORD $0x3145; BYTE $0xee // xor r14d,r13d - LONG $0x38048d42 // lea eax,[rax+r15*1] - WORD $0x8945; BYTE $0xcc // mov r12d,r9d - - ADDQ $-0x40, BP - CMPQ BP, SP - JAE loop2 - - MOVQ 0x200(SP), DI // $_ctx - ADDQ R14, AX - - ADDQ $0x1c0, SP - - ADDL (DI), AX - ADDL 4(DI), BX - ADDL 8(DI), CX - ADDL 12(DI), DX - ADDL 16(DI), R8 - ADDL 20(DI), R9 - - ADDQ $0x80, SI // input += 2 - ADDL 24(DI), R10 - MOVQ SI, R12 - ADDL 28(DI), R11 - CMPQ SI, 0x50(SP) // input == _end - - MOVL AX, (DI) - LONG $0xe4440f4c // cmove r12,rsp /* next block or stale data */ - MOVL AX, (DI) - MOVL BX, 4(DI) - MOVL CX, 8(DI) - MOVL DX, 12(DI) - MOVL R8, 16(DI) - MOVL R9, 20(DI) - MOVL R10, 24(DI) - MOVL R11, 28(DI) - - JBE loop0 - LEAQ (SP), BP - -done: - MOVQ BP, SP - MOVQ 0x58(SP), SP // restore saved stack pointer - WORD $0xf8c5; BYTE $0x77 // vzeroupper - - RET - diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm b/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm deleted file mode 100644 index c959b1aa..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm +++ /dev/null @@ -1,686 +0,0 @@ - -// 16x Parallel implementation of SHA256 for AVX512 - -// -// Minio Cloud Storage, (C) 2017 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// -// This code is based on the Intel Multi-Buffer Crypto for IPSec library -// and more specifically the following implementation: -// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm -// -// For Golang it has been converted into Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble the AVX512 instructions -// - -// Copyright (c) 2017, Intel Corporation -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of Intel Corporation nor the names of its contributors -// may be used to endorse or promote products derived from this software -// without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#define SHA256_DIGEST_ROW_SIZE 64 - -// arg1 -#define STATE rdi -#define STATE_P9 DI -// arg2 -#define INP_SIZE rsi -#define INP_SIZE_P9 SI - -#define IDX rcx -#define TBL rdx -#define TBL_P9 DX - -#define INPUT rax -#define INPUT_P9 AX - -#define inp0 r9 -#define SCRATCH_P9 R12 -#define SCRATCH r12 -#define maskp r13 -#define MASKP_P9 R13 -#define mask r14 -#define MASK_P9 R14 - -#define A zmm0 -#define B zmm1 -#define C zmm2 -#define D zmm3 -#define E zmm4 -#define F zmm5 -#define G zmm6 -#define H zmm7 -#define T1 zmm8 -#define TMP0 zmm9 -#define TMP1 zmm10 -#define TMP2 zmm11 -#define TMP3 zmm12 -#define TMP4 zmm13 -#define TMP5 zmm14 -#define TMP6 zmm15 - -#define W0 zmm16 -#define W1 zmm17 -#define W2 zmm18 -#define W3 zmm19 -#define W4 zmm20 -#define W5 zmm21 -#define W6 zmm22 -#define W7 zmm23 -#define W8 zmm24 -#define W9 zmm25 -#define W10 zmm26 -#define W11 zmm27 -#define W12 zmm28 -#define W13 zmm29 -#define W14 zmm30 -#define W15 zmm31 - - -#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \ - \ - \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0} - \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0} - \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0} - \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0} - \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0} - \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0} - \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0} - \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0} - \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0} - \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0} - \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0} - \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0} - \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0} - \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0} - \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0} - \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0} - \ - \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} - \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} - \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} - \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} - \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} - \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} - \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} - \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} - \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} - \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} - \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} - \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} - \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} - \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} - \ - \ // process top half - vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0} - vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2} - vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0} - vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2} - \ - vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1} - vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2} - vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3} - vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0} - \ - \ // use r2 in place of t0 - vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0} - vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2} - vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0} - vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2} - \ - vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1} - vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2} - vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3} - vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0} - \ - \ // use r6 in place of t0 - vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0} - vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2} - vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0} - vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2} - \ - vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1} - vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2} - vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3} - vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0} - \ - \ // use r10 in place of t0 - vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0} - vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2} - vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00} - vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02} - \ - vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1} - vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2} - vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3} - vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0} - \ - \ // At this point, the registers that contain interesting data are: - \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12 - \ // Can use t1 and r14 as scratch registers - LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \ - LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \ - \ - vmovdqu32 _r14, [rbx] \ - vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0} - vmovdqu32 _t1, [r8] \ - vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4} - \ - vmovdqu32 _r2, [rbx] \ - vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1} - vmovdqu32 _t0, [r8] \ - vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5} - \ - vmovdqu32 _r3, [rbx] \ - vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2} - vmovdqu32 _r7, [r8] \ - vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6} - \ - vmovdqu32 _r1, [rbx] \ - vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3} - vmovdqu32 _r5, [r8] \ - vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7} - \ - vmovdqu32 _r0, [rbx] \ - vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0} - vmovdqu32 _r4, [r8] \ - vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4} - \ - vmovdqu32 _r6, [rbx] \ - vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1} - vmovdqu32 _r10, [r8] \ - vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5} - \ - vmovdqu32 _r11, [rbx] \ - vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2} - vmovdqu32 _r15, [r8] \ - vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6} - \ - vmovdqu32 _r9, [rbx] \ - vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3} - vmovdqu32 _r13, [r8] \ - vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7} - \ - \ // At this point r8 and r12 can be used as scratch registers - vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} - vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} - \ - vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} - vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} - \ - vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} - vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - \ - vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} - vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} - \ - vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} - vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} - \ - vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} - vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - \ - vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} - vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} - \ - vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} - vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} - \ - vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} - vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} - - -// CH(A, B, C) = (A&B) ^ (~A&C) -// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G) -// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22 -// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25 -// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3 -// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10 - -// Main processing loop per round -#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \ - \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt - \ // T2 = SIGMA0(A) + MAJ(A, B, C) - \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2 - \ - \ // H becomes T2, then add T1 for A - \ // D becomes D + T1 for E - \ - vpaddd T1, _H, TMP3 \ // T1 = H + Kt - vmovdqu32 TMP0, _E \ - vprord TMP1, _E, 6 \ // ROR_6(E) - vprord TMP2, _E, 11 \ // ROR_11(E) - vprord TMP3, _E, 25 \ // ROR_25(E) - vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G) - vpaddd T1, T1, _WT \ // T1 = T1 + Wt - vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E) - vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G) - vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E) - vpaddd _D, _D, T1 \ // D = D + T1 - \ - vprord _H, _A, 2 \ // ROR_2(A) - vprord TMP2, _A, 13 \ // ROR_13(A) - vprord TMP3, _A, 22 \ // ROR_22(A) - vmovdqu32 TMP0, _A \ - vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C) - vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A) - vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C) - vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1 - \ - vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt - - -#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \ - vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2) - vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2) - vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2) - vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2) - \ - vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) - vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7 - \ - vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15) - vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15) - vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15) - vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15) - \ - vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + - \ // Wt-7 + sigma0(Wt-15) + - - -// Note this is reading in a block of data for one lane -// When all 16 are read, the data must be transposed to build msg schedule -#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \ - TESTQ $(1<(SB), TBL_P9 - vmovdqu32 TMP2, [TBL] - - // Get first K from table - MOVQ table+16(FP), TBL_P9 - vmovdqu32 TMP3, [TBL] - - // Save digests for later addition - vmovdqu32 [SCRATCH + 64*0], A - vmovdqu32 [SCRATCH + 64*1], B - vmovdqu32 [SCRATCH + 64*2], C - vmovdqu32 [SCRATCH + 64*3], D - vmovdqu32 [SCRATCH + 64*4], E - vmovdqu32 [SCRATCH + 64*5], F - vmovdqu32 [SCRATCH + 64*6], G - vmovdqu32 [SCRATCH + 64*7], H - - add IDX, 64 - - // Transpose input data - TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1) - - vpshufb W0, W0, TMP2 - vpshufb W1, W1, TMP2 - vpshufb W2, W2, TMP2 - vpshufb W3, W3, TMP2 - vpshufb W4, W4, TMP2 - vpshufb W5, W5, TMP2 - vpshufb W6, W6, TMP2 - vpshufb W7, W7, TMP2 - vpshufb W8, W8, TMP2 - vpshufb W9, W9, TMP2 - vpshufb W10, W10, TMP2 - vpshufb W11, W11, TMP2 - vpshufb W12, W12, TMP2 - vpshufb W13, W13, TMP2 - vpshufb W14, W14, TMP2 - vpshufb W15, W15, TMP2 - - // MSG Schedule for W0-W15 is now complete in registers - // Process first 48 rounds - // Calculate next Wt+16 after processing is complete and Wt is unneeded - - PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) - PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) - PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) - PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) - PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) - PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) - PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) - PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) - PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) - PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) - PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) - PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) - PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) - PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) - PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) - PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) - - // Check if this is the last block - sub INP_SIZE, 1 - JE lastLoop - - // Load next mask for inputs - ADDQ $8, MASKP_P9 - MOVQ (MASKP_P9), MASK_P9 - - // Process last 16 rounds - // Read in next block msg data for use in first 16 words of msg sched - - PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_00_15( W0, 0, skipNext0) - PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_00_15( W1, 1, skipNext1) - PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_00_15( W2, 2, skipNext2) - PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_00_15( W3, 3, skipNext3) - PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_00_15( W4, 4, skipNext4) - PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_00_15( W5, 5, skipNext5) - PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_00_15( W6, 6, skipNext6) - PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_00_15( W7, 7, skipNext7) - PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) - MSG_SCHED_ROUND_00_15( W8, 8, skipNext8) - PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) - MSG_SCHED_ROUND_00_15( W9, 9, skipNext9) - PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) - MSG_SCHED_ROUND_00_15(W10, 10, skipNext10) - PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) - MSG_SCHED_ROUND_00_15(W11, 11, skipNext11) - PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) - MSG_SCHED_ROUND_00_15(W12, 12, skipNext12) - PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) - MSG_SCHED_ROUND_00_15(W13, 13, skipNext13) - PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) - MSG_SCHED_ROUND_00_15(W14, 14, skipNext14) - PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) - MSG_SCHED_ROUND_00_15(W15, 15, skipNext15) - - // Add old digest - vmovdqu32 TMP2, A - vmovdqu32 A, [SCRATCH + 64*0] - vpaddd A{k1}, A, TMP2 - vmovdqu32 TMP2, B - vmovdqu32 B, [SCRATCH + 64*1] - vpaddd B{k1}, B, TMP2 - vmovdqu32 TMP2, C - vmovdqu32 C, [SCRATCH + 64*2] - vpaddd C{k1}, C, TMP2 - vmovdqu32 TMP2, D - vmovdqu32 D, [SCRATCH + 64*3] - vpaddd D{k1}, D, TMP2 - vmovdqu32 TMP2, E - vmovdqu32 E, [SCRATCH + 64*4] - vpaddd E{k1}, E, TMP2 - vmovdqu32 TMP2, F - vmovdqu32 F, [SCRATCH + 64*5] - vpaddd F{k1}, F, TMP2 - vmovdqu32 TMP2, G - vmovdqu32 G, [SCRATCH + 64*6] - vpaddd G{k1}, G, TMP2 - vmovdqu32 TMP2, H - vmovdqu32 H, [SCRATCH + 64*7] - vpaddd H{k1}, H, TMP2 - - kmovq k1, mask - JMP lloop - -lastLoop: - // Process last 16 rounds - PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) - PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) - PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) - PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) - PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) - PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) - PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) - PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) - PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) - PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) - PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) - PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) - PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) - PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) - PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) - PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) - - // Add old digest - vmovdqu32 TMP2, A - vmovdqu32 A, [SCRATCH + 64*0] - vpaddd A{k1}, A, TMP2 - vmovdqu32 TMP2, B - vmovdqu32 B, [SCRATCH + 64*1] - vpaddd B{k1}, B, TMP2 - vmovdqu32 TMP2, C - vmovdqu32 C, [SCRATCH + 64*2] - vpaddd C{k1}, C, TMP2 - vmovdqu32 TMP2, D - vmovdqu32 D, [SCRATCH + 64*3] - vpaddd D{k1}, D, TMP2 - vmovdqu32 TMP2, E - vmovdqu32 E, [SCRATCH + 64*4] - vpaddd E{k1}, E, TMP2 - vmovdqu32 TMP2, F - vmovdqu32 F, [SCRATCH + 64*5] - vpaddd F{k1}, F, TMP2 - vmovdqu32 TMP2, G - vmovdqu32 G, [SCRATCH + 64*6] - vpaddd G{k1}, G, TMP2 - vmovdqu32 TMP2, H - vmovdqu32 H, [SCRATCH + 64*7] - vpaddd H{k1}, H, TMP2 - - // Write out digest - vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A - vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B - vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C - vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D - vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E - vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F - vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G - vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H - - VZEROUPPER - RET - -// -// Tables -// - -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b -GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 - -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D -GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 - -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F -GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go deleted file mode 100644 index db8e48d3..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go +++ /dev/null @@ -1,500 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -import ( - "encoding/binary" - "errors" - "hash" - "sort" - "sync/atomic" - "time" -) - -//go:noescape -func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte) - -// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to -// differentiate with default initialiation value of 0 -const Avx512ServerUID = 16 - -var uidCounter uint64 - -// NewAvx512 - initialize sha256 Avx512 implementation. -func NewAvx512(a512srv *Avx512Server) hash.Hash { - uid := atomic.AddUint64(&uidCounter, 1) - return &Avx512Digest{uid: uid, a512srv: a512srv} -} - -// Avx512Digest - Type for computing SHA256 using Avx512 -type Avx512Digest struct { - uid uint64 - a512srv *Avx512Server - x [chunk]byte - nx int - len uint64 - final bool - result [Size]byte -} - -// Size - Return size of checksum -func (d *Avx512Digest) Size() int { return Size } - -// BlockSize - Return blocksize of checksum -func (d Avx512Digest) BlockSize() int { return BlockSize } - -// Reset - reset sha digest to its initial values -func (d *Avx512Digest) Reset() { - d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true} - d.nx = 0 - d.len = 0 - d.final = false -} - -// Write to digest -func (d *Avx512Digest) Write(p []byte) (nn int, err error) { - - if d.final { - return 0, errors.New("Avx512Digest already finalized. Reset first before writing again") - } - - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := copy(d.x[d.nx:], p) - d.nx += n - if d.nx == chunk { - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]} - d.nx = 0 - } - p = p[n:] - } - if len(p) >= chunk { - n := len(p) &^ (chunk - 1) - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]} - p = p[n:] - } - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -// Sum - Return sha256 sum in bytes -func (d *Avx512Digest) Sum(in []byte) (result []byte) { - - if d.final { - return append(in, d.result[:]...) - } - - trail := make([]byte, 0, 128) - trail = append(trail, d.x[:d.nx]...) - - len := d.len - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - var tmp [64]byte - tmp[0] = 0x80 - if len%64 < 56 { - trail = append(trail, tmp[0:56-len%64]...) - } else { - trail = append(trail, tmp[0:64+56-len%64]...) - } - d.nx = 0 - - // Length in bits. - len <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(len >> (56 - 8*i)) - } - trail = append(trail, tmp[0:8]...) - - sumCh := make(chan [Size]byte) - d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh} - d.result = <-sumCh - d.final = true - return append(in, d.result[:]...) -} - -var table = [512]uint64{ - 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, - 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, - 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, - 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, - 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, - 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, - 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, - 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, - 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, - 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, - 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, - 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, - 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, - 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, - 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, - 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, - 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, - 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, - 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, - 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, - 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, - 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, - 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, - 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, - 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, - 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, - 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, - 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, - 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, - 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, - 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, - 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, - 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, - 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, - 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, - 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, - 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, - 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, - 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, - 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, - 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, - 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, - 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, - 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, - 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, - 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, - 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, - 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, - 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, - 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, - 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, - 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, - 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, - 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, - 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, - 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, - 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, - 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, - 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, - 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, - 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, - 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, - 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, - 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, - 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, - 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, - 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, - 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, - 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, - 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, - 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, - 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, - 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, - 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, - 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, - 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, - 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, - 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, - 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, - 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, - 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, - 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, - 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, - 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, - 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, - 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, - 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, - 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, - 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, - 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, - 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, - 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, - 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, - 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, - 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, - 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, - 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, - 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, - 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, - 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, - 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, - 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, - 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, - 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, - 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, - 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, - 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, - 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, - 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, - 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, - 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, - 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, - 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, - 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, - 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, - 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, - 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, - 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, - 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, - 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, - 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, - 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, - 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, - 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, - 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, - 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, - 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, - 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2} - -// Interface function to assembly ode -func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte { - - scratch := [512]byte{} - sha256X16Avx512(digests, &scratch, &table, mask, input) - - output := [16][Size]byte{} - for i := 0; i < 16; i++ { - output[i] = getDigest(i, digests[:]) - } - - return output -} - -func getDigest(index int, state []byte) (sum [Size]byte) { - for j := 0; j < 16; j += 2 { - for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size { - binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4])) - } - } - return -} - -// Message to send across input channel -type blockInput struct { - uid uint64 - msg []byte - reset bool - final bool - sumCh chan [Size]byte -} - -// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations -type Avx512Server struct { - blocksCh chan blockInput // Input channel - totalIn int // Total number of inputs waiting to be processed - lanes [16]Avx512LaneInfo // Array with info per lane (out of 16) - digests map[uint64][Size]byte // Map of uids to (interim) digest results -} - -// Avx512LaneInfo - Info for each lane -type Avx512LaneInfo struct { - uid uint64 // unique identification for this SHA processing - block []byte // input block to be processed - outputCh chan [Size]byte // channel for output result -} - -// NewAvx512Server - Create new object for parallel processing handling -func NewAvx512Server() *Avx512Server { - a512srv := &Avx512Server{} - a512srv.digests = make(map[uint64][Size]byte) - a512srv.blocksCh = make(chan blockInput) - - // Start a single thread for reading from the input channel - go a512srv.Process() - return a512srv -} - -// Process - Sole handler for reading from the input channel -func (a512srv *Avx512Server) Process() { - for { - select { - case block := <-a512srv.blocksCh: - if block.reset { - a512srv.reset(block.uid) - continue - } - index := block.uid & 0xf - // fmt.Println("Adding message:", block.uid, index) - - if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs - //fmt.Println("Invoking Blocks()") - a512srv.blocks() - } - a512srv.totalIn++ - a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg} - if block.final { - a512srv.lanes[index].outputCh = block.sumCh - } - if a512srv.totalIn == len(a512srv.lanes) { - // fmt.Println("Invoking Blocks() while FULL: ") - a512srv.blocks() - } - - // TODO: test with larger timeout - case <-time.After(1 * time.Microsecond): - for _, lane := range a512srv.lanes { - if lane.block != nil { // check if there is any input to process - // fmt.Println("Invoking Blocks() on TIMEOUT: ") - a512srv.blocks() - break // we are done - } - } - } - } -} - -// Do a reset for this calculation -func (a512srv *Avx512Server) reset(uid uint64) { - - // Check if there is a message still waiting to be processed (and remove if so) - for i, lane := range a512srv.lanes { - if lane.uid == uid { - if lane.block != nil { - a512srv.lanes[i] = Avx512LaneInfo{} // clear message - a512srv.totalIn-- - } - } - } - - // Delete entry from hash map - delete(a512srv.digests, uid) -} - -// Invoke assembly and send results back -func (a512srv *Avx512Server) blocks() { - - inputs := [16][]byte{} - for i := range inputs { - inputs[i] = a512srv.lanes[i].block - } - - mask := expandMask(genMask(inputs)) - outputs := blockAvx512(a512srv.getDigests(), inputs, mask) - - a512srv.totalIn = 0 - for i := 0; i < len(outputs); i++ { - uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh - a512srv.digests[uid] = outputs[i] - a512srv.lanes[i] = Avx512LaneInfo{} - - if outputCh != nil { - // Send back result - outputCh <- outputs[i] - delete(a512srv.digests, uid) // Delete entry from hashmap - } - } -} - -func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) { - a512srv.blocksCh <- blockInput{uid: uid, msg: p} - return len(p), nil -} - -// Sum - return sha256 sum in bytes for a given sum id. -func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte { - sumCh := make(chan [32]byte) - a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh} - return <-sumCh -} - -func (a512srv *Avx512Server) getDigests() *[512]byte { - digests := [512]byte{} - for i, lane := range a512srv.lanes { - a, ok := a512srv.digests[lane.uid] - if ok { - binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4])) - binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8])) - binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12])) - binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16])) - binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20])) - binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24])) - binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28])) - binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32])) - } else { - binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) - binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) - binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) - binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) - binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) - binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) - binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) - binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) - } - } - return &digests -} - -// Helper struct for sorting blocks based on length -type lane struct { - len uint - pos uint -} - -type lanes []lane - -func (lns lanes) Len() int { return len(lns) } -func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] } -func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len } - -// Helper struct for -type maskRounds struct { - mask uint64 - rounds uint64 -} - -func genMask(input [16][]byte) [16]maskRounds { - - // Sort on blocks length small to large - var sorted [16]lane - for c, inpt := range input { - sorted[c] = lane{uint(len(inpt)), uint(c)} - } - sort.Sort(lanes(sorted[:])) - - // Create mask array including 'rounds' between masks - m, round, index := uint64(0xffff), uint64(0), 0 - var mr [16]maskRounds - for _, s := range sorted { - if s.len > 0 { - if uint64(s.len)>>6 > round { - mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round} - index++ - } - round = uint64(s.len) >> 6 - } - m = m & ^(1 << uint(s.pos)) - } - - return mr -} - -// TODO: remove function -func expandMask(mr [16]maskRounds) []uint64 { - size := uint64(0) - for _, r := range mr { - size += r.rounds - } - result, index := make([]uint64, size), 0 - for _, r := range mr { - for j := uint64(0); j < r.rounds; j++ { - result[index] = r.mask - index++ - } - } - return result -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s deleted file mode 100644 index 275bcacb..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s +++ /dev/null @@ -1,267 +0,0 @@ -//+build !noasm,!appengine - -TEXT ·sha256X16Avx512(SB), 7, $0 - MOVQ digests+0(FP), DI - MOVQ scratch+8(FP), R12 - MOVQ mask_len+32(FP), SI - MOVQ mask_base+24(FP), R13 - MOVQ (R13), R14 - LONG $0x92fbc1c4; BYTE $0xce - LEAQ inputs+48(FP), AX - QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07 - MOVQ table+16(FP), DX - WORD $0x3148; BYTE $0xc9 - TESTQ $(1<<0), R14 - JE skipInput0 - MOVQ 0*24(AX), R9 - LONG $0x487cc162; WORD $0x0410; BYTE $0x09 - -skipInput0: - TESTQ $(1<<1), R14 - JE skipInput1 - MOVQ 1*24(AX), R9 - LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 - -skipInput1: - TESTQ $(1<<2), R14 - JE skipInput2 - MOVQ 2*24(AX), R9 - LONG $0x487cc162; WORD $0x1410; BYTE $0x09 - -skipInput2: - TESTQ $(1<<3), R14 - JE skipInput3 - MOVQ 3*24(AX), R9 - LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 - -skipInput3: - TESTQ $(1<<4), R14 - JE skipInput4 - MOVQ 4*24(AX), R9 - LONG $0x487cc162; WORD $0x2410; BYTE $0x09 - -skipInput4: - TESTQ $(1<<5), R14 - JE skipInput5 - MOVQ 5*24(AX), R9 - LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 - -skipInput5: - TESTQ $(1<<6), R14 - JE skipInput6 - MOVQ 6*24(AX), R9 - LONG $0x487cc162; WORD $0x3410; BYTE $0x09 - -skipInput6: - TESTQ $(1<<7), R14 - JE skipInput7 - MOVQ 7*24(AX), R9 - LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 - -skipInput7: - TESTQ $(1<<8), R14 - JE skipInput8 - MOVQ 8*24(AX), R9 - LONG $0x487c4162; WORD $0x0410; BYTE $0x09 - -skipInput8: - TESTQ $(1<<9), R14 - JE skipInput9 - MOVQ 9*24(AX), R9 - LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 - -skipInput9: - TESTQ $(1<<10), R14 - JE skipInput10 - MOVQ 10*24(AX), R9 - LONG $0x487c4162; WORD $0x1410; BYTE $0x09 - -skipInput10: - TESTQ $(1<<11), R14 - JE skipInput11 - MOVQ 11*24(AX), R9 - LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 - -skipInput11: - TESTQ $(1<<12), R14 - JE skipInput12 - MOVQ 12*24(AX), R9 - LONG $0x487c4162; WORD $0x2410; BYTE $0x09 - -skipInput12: - TESTQ $(1<<13), R14 - JE skipInput13 - MOVQ 13*24(AX), R9 - LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 - -skipInput13: - TESTQ $(1<<14), R14 - JE skipInput14 - MOVQ 14*24(AX), R9 - LONG $0x487c4162; WORD $0x3410; BYTE $0x09 - -skipInput14: - TESTQ $(1<<15), R14 - JE skipInput15 - MOVQ 15*24(AX), R9 - LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 - -skipInput15: -lloop: - LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX - LONG $0x487e7162; WORD $0x1a6f - MOVQ table+16(FP), DX - QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88 - LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX - LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 - QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005 - JE lastLoop - ADDQ $8, R13 - MOVQ (R13), R14 - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31 - TESTQ $(1<<0), R14 - JE skipNext0 - MOVQ 0*24(AX), R9 - LONG $0x487cc162; WORD $0x0410; BYTE $0x09 - -skipNext0: - QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32 - TESTQ $(1<<1), R14 - JE skipNext1 - MOVQ 1*24(AX), R9 - LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 - -skipNext1: - QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33 - TESTQ $(1<<2), R14 - JE skipNext2 - MOVQ 2*24(AX), R9 - LONG $0x487cc162; WORD $0x1410; BYTE $0x09 - -skipNext2: - QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34 - TESTQ $(1<<3), R14 - JE skipNext3 - MOVQ 3*24(AX), R9 - LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 - -skipNext3: - QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35 - TESTQ $(1<<4), R14 - JE skipNext4 - MOVQ 4*24(AX), R9 - LONG $0x487cc162; WORD $0x2410; BYTE $0x09 - -skipNext4: - QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36 - TESTQ $(1<<5), R14 - JE skipNext5 - MOVQ 5*24(AX), R9 - LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 - -skipNext5: - QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37 - TESTQ $(1<<6), R14 - JE skipNext6 - MOVQ 6*24(AX), R9 - LONG $0x487cc162; WORD $0x3410; BYTE $0x09 - -skipNext6: - QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38 - TESTQ $(1<<7), R14 - JE skipNext7 - MOVQ 7*24(AX), R9 - LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 - -skipNext7: - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39 - TESTQ $(1<<8), R14 - JE skipNext8 - MOVQ 8*24(AX), R9 - LONG $0x487c4162; WORD $0x0410; BYTE $0x09 - -skipNext8: - QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a - TESTQ $(1<<9), R14 - JE skipNext9 - MOVQ 9*24(AX), R9 - LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 - -skipNext9: - QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b - TESTQ $(1<<10), R14 - JE skipNext10 - MOVQ 10*24(AX), R9 - LONG $0x487c4162; WORD $0x1410; BYTE $0x09 - -skipNext10: - QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c - TESTQ $(1<<11), R14 - JE skipNext11 - MOVQ 11*24(AX), R9 - LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 - -skipNext11: - QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d - TESTQ $(1<<12), R14 - JE skipNext12 - MOVQ 12*24(AX), R9 - LONG $0x487c4162; WORD $0x2410; BYTE $0x09 - -skipNext12: - QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e - TESTQ $(1<<13), R14 - JE skipNext13 - MOVQ 13*24(AX), R9 - LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 - -skipNext13: - QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f - TESTQ $(1<<14), R14 - JE skipNext14 - MOVQ 14*24(AX), R9 - LONG $0x487c4162; WORD $0x3410; BYTE $0x09 - -skipNext14: - QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40 - TESTQ $(1<<15), R14 - JE skipNext15 - MOVQ 15*24(AX), R9 - LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 - -skipNext15: - QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1 - JMP lloop - -lastLoop: - QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f - VZEROUPPER - RET - -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 -DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b -GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C -DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D -GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E -DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F -GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go b/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go deleted file mode 100644 index c2f71181..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -//go:noescape -func blockAvx(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s b/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s deleted file mode 100644 index 9f444d49..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s +++ /dev/null @@ -1,408 +0,0 @@ -//+build !noasm,!appengine - -// SHA256 implementation for AVX - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// This code is based on an Intel White-Paper: -// "Fast SHA-256 Implementations on Intel Architecture Processors" -// -// together with the reference implementation from the following authors: -// James Guilford -// Kirk Yap -// Tim Chen -// -// For Golang it has been converted to Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 -// equivalents -// - -#include "textflag.h" - -#define ROTATE_XS \ - MOVOU X4, X15 \ - MOVOU X5, X4 \ - MOVOU X6, X5 \ - MOVOU X7, X6 \ - MOVOU X15, X7 - -// compute s0 four at a time and s1 two at a time -// compute W[-16] + W[-7] 4 at a time -#define FOUR_ROUNDS_AND_SCHED(a, b, c, d, e, f, g, h) \ - MOVL e, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL a, R14 \ // y1 = a - LONG $0x0f41e3c4; WORD $0x04c6 \ // VPALIGNR XMM0,XMM7,XMM6,0x4 /* XTMP0 = W[-7] */ - ROLL $23, R14 \ // y1 = a >> (22-13) - XORL e, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL f, R15 \ // y2 = f - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL a, R14 \ // y1 = a ^ (a >> (22-13) - XORL g, R15 \ // y2 = f^g - LONG $0xc4fef9c5 \ // VPADDD XMM0,XMM0,XMM4 /* XTMP0 = W[-7] + W[-16] */ - XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6) ) - ANDL e, R15 \ // y2 = (f^g)&e - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - \ - \ // compute s0 - \ - LONG $0x0f51e3c4; WORD $0x04cc \ // VPALIGNR XMM1,XMM5,XMM4,0x4 /* XTMP1 = W[-15] */ - XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL g, R15 \ // y2 = CH = ((f^g)&e)^g - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+48(FP), R15 \ // y2 = k + w + S1 + CH - MOVL a, R13 \ // y0 = a - ADDL R15, h \ // h = h + S1 + CH + k + w - \ // ROTATE_ARGS - MOVL a, R15 \ // y2 = a - LONG $0xd172e9c5; BYTE $0x07 \ // VPSRLD XMM2,XMM1,0x7 /* */ - ORL c, R13 \ // y0 = a|c - ADDL h, d \ // d = d + h + S1 + CH + k + w - ANDL c, R15 \ // y2 = a&c - LONG $0xf172e1c5; BYTE $0x19 \ // VPSLLD XMM3,XMM1,0x19 /* */ - ANDL b, R13 \ // y0 = (a|c)&b - ADDL R14, h \ // h = h + S1 + CH + k + w + S0 - LONG $0xdaebe1c5 \ // VPOR XMM3,XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, h \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL d, R13 \ // y0 = e - MOVL h, R14 \ // y1 = a - ROLL $18, R13 \ // y0 = e >> (25-11) - XORL d, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL e, R15 \ // y2 = f - ROLL $23, R14 \ // y1 = a >> (22-13) - LONG $0xd172e9c5; BYTE $0x12 \ // VPSRLD XMM2,XMM1,0x12 /* */ - XORL h, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL f, R15 \ // y2 = f^g - LONG $0xd172b9c5; BYTE $0x03 \ // VPSRLD XMM8,XMM1,0x3 /* XTMP4 = W[-15] >> 3 */ - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - XORL d, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL d, R15 \ // y2 = (f^g)&e - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - LONG $0xf172f1c5; BYTE $0x0e \ // VPSLLD XMM1,XMM1,0xe /* */ - XORL h, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - XORL f, R15 \ // y2 = CH = ((f^g)&e)^g - LONG $0xd9efe1c5 \ // VPXOR XMM3,XMM3,XMM1 /* */ - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+52(FP), R15 \ // y2 = k + w + S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - LONG $0xdaefe1c5 \ // VPXOR XMM3,XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR */ - MOVL h, R13 \ // y0 = a - ADDL R15, g \ // h = h + S1 + CH + k + w - MOVL h, R15 \ // y2 = a - LONG $0xef61c1c4; BYTE $0xc8 \ // VPXOR XMM1,XMM3,XMM8 /* XTMP1 = s0 */ - ORL b, R13 \ // y0 = a|c - ADDL g, c \ // d = d + h + S1 + CH + k + w - ANDL b, R15 \ // y2 = a&c - \ - \ // compute low s1 - \ - LONG $0xd770f9c5; BYTE $0xfa \ // VPSHUFD XMM2,XMM7,0xfa /* XTMP2 = W[-2] {BBAA} */ - ANDL a, R13 \ // y0 = (a|c)&b - ADDL R14, g \ // h = h + S1 + CH + k + w + S0 - LONG $0xc1fef9c5 \ // VPADDD XMM0,XMM0,XMM1 /* XTMP0 = W[-16] + W[-7] + s0 */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, g \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL c, R13 \ // y0 = e - MOVL g, R14 \ // y1 = a - ROLL $18, R13 \ // y0 = e >> (25-11) - XORL c, R13 \ // y0 = e ^ (e >> (25-11)) - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVL d, R15 \ // y2 = f - XORL g, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - LONG $0xd272b9c5; BYTE $0x0a \ // VPSRLD XMM8,XMM2,0xa /* XTMP4 = W[-2] >> 10 {BBAA} */ - XORL e, R15 \ // y2 = f^g - LONG $0xd273e1c5; BYTE $0x13 \ // VPSRLQ XMM3,XMM2,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */ - XORL c, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL c, R15 \ // y2 = (f^g)&e - LONG $0xd273e9c5; BYTE $0x11 \ // VPSRLQ XMM2,XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */ - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - XORL g, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - XORL e, R15 \ // y2 = CH = ((f^g)&e)^g - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - LONG $0xd3efe9c5 \ // VPXOR XMM2,XMM2,XMM3 /* */ - ADDL R13, R15 \ // y2 = S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL _xfer+56(FP), R15 \ // y2 = k + w + S1 + CH - LONG $0xc2ef39c5 \ // VPXOR XMM8,XMM8,XMM2 /* XTMP4 = s1 {xBxA} */ - MOVL g, R13 \ // y0 = a - ADDL R15, f \ // h = h + S1 + CH + k + w - MOVL g, R15 \ // y2 = a - LONG $0x003942c4; BYTE $0xc2 \ // VPSHUFB XMM8,XMM8,XMM10 /* XTMP4 = s1 {00BA} */ - ORL a, R13 \ // y0 = a|c - ADDL f, b \ // d = d + h + S1 + CH + k + w - ANDL a, R15 \ // y2 = a&c - LONG $0xfe79c1c4; BYTE $0xc0 \ // VPADDD XMM0,XMM0,XMM8 /* XTMP0 = {..., ..., W[1], W[0]} */ - ANDL h, R13 \ // y0 = (a|c)&b - ADDL R14, f \ // h = h + S1 + CH + k + w + S0 - \ - \ // compute high s1 - \ - LONG $0xd070f9c5; BYTE $0x50 \ // VPSHUFD XMM2,XMM0,0x50 /* XTMP2 = W[-2] {DDCC} */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, f \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL b, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL f, R14 \ // y1 = a - ROLL $23, R14 \ // y1 = a >> (22-13) - XORL b, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL c, R15 \ // y2 = f - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - LONG $0xd272a1c5; BYTE $0x0a \ // VPSRLD XMM11,XMM2,0xa /* XTMP5 = W[-2] >> 10 {DDCC} */ - XORL f, R14 \ // y1 = a ^ (a >> (22-13) - XORL d, R15 \ // y2 = f^g - LONG $0xd273e1c5; BYTE $0x13 \ // VPSRLQ XMM3,XMM2,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */ - XORL b, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL b, R15 \ // y2 = (f^g)&e - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - LONG $0xd273e9c5; BYTE $0x11 \ // VPSRLQ XMM2,XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */ - XORL f, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL d, R15 \ // y2 = CH = ((f^g)&e)^g - LONG $0xd3efe9c5 \ // VPXOR XMM2,XMM2,XMM3 /* */ - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+60(FP), R15 \ // y2 = k + w + S1 + CH - LONG $0xdaef21c5 \ // VPXOR XMM11,XMM11,XMM2 /* XTMP5 = s1 {xDxC} */ - MOVL f, R13 \ // y0 = a - ADDL R15, e \ // h = h + S1 + CH + k + w - MOVL f, R15 \ // y2 = a - LONG $0x002142c4; BYTE $0xdc \ // VPSHUFB XMM11,XMM11,XMM12 /* XTMP5 = s1 {DC00} */ - ORL h, R13 \ // y0 = a|c - ADDL e, a \ // d = d + h + S1 + CH + k + w - ANDL h, R15 \ // y2 = a&c - LONG $0xe0fea1c5 \ // VPADDD XMM4,XMM11,XMM0 /* X0 = {W[3], W[2], W[1], W[0]} */ - ANDL g, R13 \ // y0 = (a|c)&b - ADDL R14, e \ // h = h + S1 + CH + k + w + S0 - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, e \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - ROTATE_XS - -#define DO_ROUND(a, b, c, d, e, f, g, h, offset) \ - MOVL e, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL a, R14 \ // y1 = a - XORL e, R13 \ // y0 = e ^ (e >> (25-11)) - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVL f, R15 \ // y2 = f - XORL a, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL g, R15 \ // y2 = f^g - XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - ANDL e, R15 \ // y2 = (f^g)&e - XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL g, R15 \ // y2 = CH = ((f^g)&e)^g - ADDL R13, R15 \ // y2 = S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL _xfer+offset(FP), R15 \ // y2 = k + w + S1 + CH - MOVL a, R13 \ // y0 = a - ADDL R15, h \ // h = h + S1 + CH + k + w - MOVL a, R15 \ // y2 = a - ORL c, R13 \ // y0 = a|c - ADDL h, d \ // d = d + h + S1 + CH + k + w - ANDL c, R15 \ // y2 = a&c - ANDL b, R13 \ // y0 = (a|c)&b - ADDL R14, h \ // h = h + S1 + CH + k + w + S0 - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, h // h = h + S1 + CH + k + w + S0 + MAJ - -// func blockAvx(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) -TEXT ·blockAvx(SB), 7, $0-80 - - MOVQ h+0(FP), SI // SI: &h - MOVQ message_base+24(FP), R8 // &message - MOVQ message_len+32(FP), R9 // length of message - CMPQ R9, $0 - JEQ done_hash - ADDQ R8, R9 - MOVQ R9, reserved2+64(FP) // store end of message - - // Register definition - // a --> eax - // b --> ebx - // c --> ecx - // d --> r8d - // e --> edx - // f --> r9d - // g --> r10d - // h --> r11d - // - // y0 --> r13d - // y1 --> r14d - // y2 --> r15d - - MOVL (0*4)(SI), AX // a = H0 - MOVL (1*4)(SI), BX // b = H1 - MOVL (2*4)(SI), CX // c = H2 - MOVL (3*4)(SI), R8 // d = H3 - MOVL (4*4)(SI), DX // e = H4 - MOVL (5*4)(SI), R9 // f = H5 - MOVL (6*4)(SI), R10 // g = H6 - MOVL (7*4)(SI), R11 // h = H7 - - MOVOU bflipMask<>(SB), X13 - MOVOU shuf00BA<>(SB), X10 // shuffle xBxA -> 00BA - MOVOU shufDC00<>(SB), X12 // shuffle xDxC -> DC00 - - MOVQ message_base+24(FP), SI // SI: &message - -loop0: - LEAQ constants<>(SB), BP - - // byte swap first 16 dwords - MOVOU 0*16(SI), X4 - LONG $0x0059c2c4; BYTE $0xe5 // VPSHUFB XMM4, XMM4, XMM13 - MOVOU 1*16(SI), X5 - LONG $0x0051c2c4; BYTE $0xed // VPSHUFB XMM5, XMM5, XMM13 - MOVOU 2*16(SI), X6 - LONG $0x0049c2c4; BYTE $0xf5 // VPSHUFB XMM6, XMM6, XMM13 - MOVOU 3*16(SI), X7 - LONG $0x0041c2c4; BYTE $0xfd // VPSHUFB XMM7, XMM7, XMM13 - - MOVQ SI, reserved3+72(FP) - MOVD $0x3, DI - - // schedule 48 input dwords, by doing 3 rounds of 16 each -loop1: - LONG $0x4dfe59c5; BYTE $0x00 // VPADDD XMM9, XMM4, 0[RBP] /* Add 1st constant to first part of message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) - - LONG $0x4dfe59c5; BYTE $0x10 // VPADDD XMM9, XMM4, 16[RBP] /* Add 2nd constant to message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) - - LONG $0x4dfe59c5; BYTE $0x20 // VPADDD XMM9, XMM4, 32[RBP] /* Add 3rd constant to message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) - - LONG $0x4dfe59c5; BYTE $0x30 // VPADDD XMM9, XMM4, 48[RBP] /* Add 4th constant to message */ - MOVOU X9, reserved0+48(FP) - ADDQ $64, BP - FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) - - SUBQ $1, DI - JNE loop1 - - MOVD $0x2, DI - -loop2: - LONG $0x4dfe59c5; BYTE $0x00 // VPADDD XMM9, XMM4, 0[RBP] /* Add 1st constant to first part of message */ - MOVOU X9, reserved0+48(FP) - DO_ROUND( AX, BX, CX, R8, DX, R9, R10, R11, 48) - DO_ROUND(R11, AX, BX, CX, R8, DX, R9, R10, 52) - DO_ROUND(R10, R11, AX, BX, CX, R8, DX, R9, 56) - DO_ROUND( R9, R10, R11, AX, BX, CX, R8, DX, 60) - - LONG $0x4dfe51c5; BYTE $0x10 // VPADDD XMM9, XMM5, 16[RBP] /* Add 2nd constant to message */ - MOVOU X9, reserved0+48(FP) - ADDQ $32, BP - DO_ROUND( DX, R9, R10, R11, AX, BX, CX, R8, 48) - DO_ROUND( R8, DX, R9, R10, R11, AX, BX, CX, 52) - DO_ROUND( CX, R8, DX, R9, R10, R11, AX, BX, 56) - DO_ROUND( BX, CX, R8, DX, R9, R10, R11, AX, 60) - - MOVOU X6, X4 - MOVOU X7, X5 - - SUBQ $1, DI - JNE loop2 - - MOVQ h+0(FP), SI // SI: &h - ADDL (0*4)(SI), AX // H0 = a + H0 - MOVL AX, (0*4)(SI) - ADDL (1*4)(SI), BX // H1 = b + H1 - MOVL BX, (1*4)(SI) - ADDL (2*4)(SI), CX // H2 = c + H2 - MOVL CX, (2*4)(SI) - ADDL (3*4)(SI), R8 // H3 = d + H3 - MOVL R8, (3*4)(SI) - ADDL (4*4)(SI), DX // H4 = e + H4 - MOVL DX, (4*4)(SI) - ADDL (5*4)(SI), R9 // H5 = f + H5 - MOVL R9, (5*4)(SI) - ADDL (6*4)(SI), R10 // H6 = g + H6 - MOVL R10, (6*4)(SI) - ADDL (7*4)(SI), R11 // H7 = h + H7 - MOVL R11, (7*4)(SI) - - MOVQ reserved3+72(FP), SI - ADDQ $64, SI - CMPQ reserved2+64(FP), SI - JNE loop0 - -done_hash: - RET - -// Constants table -DATA constants<>+0x0(SB)/8, $0x71374491428a2f98 -DATA constants<>+0x8(SB)/8, $0xe9b5dba5b5c0fbcf -DATA constants<>+0x10(SB)/8, $0x59f111f13956c25b -DATA constants<>+0x18(SB)/8, $0xab1c5ed5923f82a4 -DATA constants<>+0x20(SB)/8, $0x12835b01d807aa98 -DATA constants<>+0x28(SB)/8, $0x550c7dc3243185be -DATA constants<>+0x30(SB)/8, $0x80deb1fe72be5d74 -DATA constants<>+0x38(SB)/8, $0xc19bf1749bdc06a7 -DATA constants<>+0x40(SB)/8, $0xefbe4786e49b69c1 -DATA constants<>+0x48(SB)/8, $0x240ca1cc0fc19dc6 -DATA constants<>+0x50(SB)/8, $0x4a7484aa2de92c6f -DATA constants<>+0x58(SB)/8, $0x76f988da5cb0a9dc -DATA constants<>+0x60(SB)/8, $0xa831c66d983e5152 -DATA constants<>+0x68(SB)/8, $0xbf597fc7b00327c8 -DATA constants<>+0x70(SB)/8, $0xd5a79147c6e00bf3 -DATA constants<>+0x78(SB)/8, $0x1429296706ca6351 -DATA constants<>+0x80(SB)/8, $0x2e1b213827b70a85 -DATA constants<>+0x88(SB)/8, $0x53380d134d2c6dfc -DATA constants<>+0x90(SB)/8, $0x766a0abb650a7354 -DATA constants<>+0x98(SB)/8, $0x92722c8581c2c92e -DATA constants<>+0xa0(SB)/8, $0xa81a664ba2bfe8a1 -DATA constants<>+0xa8(SB)/8, $0xc76c51a3c24b8b70 -DATA constants<>+0xb0(SB)/8, $0xd6990624d192e819 -DATA constants<>+0xb8(SB)/8, $0x106aa070f40e3585 -DATA constants<>+0xc0(SB)/8, $0x1e376c0819a4c116 -DATA constants<>+0xc8(SB)/8, $0x34b0bcb52748774c -DATA constants<>+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA constants<>+0xd8(SB)/8, $0x682e6ff35b9cca4f -DATA constants<>+0xe0(SB)/8, $0x78a5636f748f82ee -DATA constants<>+0xe8(SB)/8, $0x8cc7020884c87814 -DATA constants<>+0xf0(SB)/8, $0xa4506ceb90befffa -DATA constants<>+0xf8(SB)/8, $0xc67178f2bef9a3f7 - -DATA bflipMask<>+0x00(SB)/8, $0x0405060700010203 -DATA bflipMask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b - -DATA shuf00BA<>+0x00(SB)/8, $0x0b0a090803020100 -DATA shuf00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA shufDC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA shufDC00<>+0x08(SB)/8, $0x0b0a090803020100 - -GLOBL constants<>(SB), 8, $256 -GLOBL bflipMask<>(SB), (NOPTR+RODATA), $16 -GLOBL shuf00BA<>(SB), (NOPTR+RODATA), $16 -GLOBL shufDC00<>(SB), (NOPTR+RODATA), $16 diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go b/mantle/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go deleted file mode 100644 index 483689ef..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go +++ /dev/null @@ -1,6 +0,0 @@ -//+build !noasm,!appengine - -package sha256 - -//go:noescape -func blockSha(h *[8]uint32, message []uint8) diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s b/mantle/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s deleted file mode 100644 index 909fc0ef..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s +++ /dev/null @@ -1,266 +0,0 @@ -//+build !noasm,!appengine - -// SHA intrinsic version of SHA256 - -// Kristofer Peterson, (C) 2018. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -#include "textflag.h" - -DATA K<>+0x00(SB)/4, $0x428a2f98 -DATA K<>+0x04(SB)/4, $0x71374491 -DATA K<>+0x08(SB)/4, $0xb5c0fbcf -DATA K<>+0x0c(SB)/4, $0xe9b5dba5 -DATA K<>+0x10(SB)/4, $0x3956c25b -DATA K<>+0x14(SB)/4, $0x59f111f1 -DATA K<>+0x18(SB)/4, $0x923f82a4 -DATA K<>+0x1c(SB)/4, $0xab1c5ed5 -DATA K<>+0x20(SB)/4, $0xd807aa98 -DATA K<>+0x24(SB)/4, $0x12835b01 -DATA K<>+0x28(SB)/4, $0x243185be -DATA K<>+0x2c(SB)/4, $0x550c7dc3 -DATA K<>+0x30(SB)/4, $0x72be5d74 -DATA K<>+0x34(SB)/4, $0x80deb1fe -DATA K<>+0x38(SB)/4, $0x9bdc06a7 -DATA K<>+0x3c(SB)/4, $0xc19bf174 -DATA K<>+0x40(SB)/4, $0xe49b69c1 -DATA K<>+0x44(SB)/4, $0xefbe4786 -DATA K<>+0x48(SB)/4, $0x0fc19dc6 -DATA K<>+0x4c(SB)/4, $0x240ca1cc -DATA K<>+0x50(SB)/4, $0x2de92c6f -DATA K<>+0x54(SB)/4, $0x4a7484aa -DATA K<>+0x58(SB)/4, $0x5cb0a9dc -DATA K<>+0x5c(SB)/4, $0x76f988da -DATA K<>+0x60(SB)/4, $0x983e5152 -DATA K<>+0x64(SB)/4, $0xa831c66d -DATA K<>+0x68(SB)/4, $0xb00327c8 -DATA K<>+0x6c(SB)/4, $0xbf597fc7 -DATA K<>+0x70(SB)/4, $0xc6e00bf3 -DATA K<>+0x74(SB)/4, $0xd5a79147 -DATA K<>+0x78(SB)/4, $0x06ca6351 -DATA K<>+0x7c(SB)/4, $0x14292967 -DATA K<>+0x80(SB)/4, $0x27b70a85 -DATA K<>+0x84(SB)/4, $0x2e1b2138 -DATA K<>+0x88(SB)/4, $0x4d2c6dfc -DATA K<>+0x8c(SB)/4, $0x53380d13 -DATA K<>+0x90(SB)/4, $0x650a7354 -DATA K<>+0x94(SB)/4, $0x766a0abb -DATA K<>+0x98(SB)/4, $0x81c2c92e -DATA K<>+0x9c(SB)/4, $0x92722c85 -DATA K<>+0xa0(SB)/4, $0xa2bfe8a1 -DATA K<>+0xa4(SB)/4, $0xa81a664b -DATA K<>+0xa8(SB)/4, $0xc24b8b70 -DATA K<>+0xac(SB)/4, $0xc76c51a3 -DATA K<>+0xb0(SB)/4, $0xd192e819 -DATA K<>+0xb4(SB)/4, $0xd6990624 -DATA K<>+0xb8(SB)/4, $0xf40e3585 -DATA K<>+0xbc(SB)/4, $0x106aa070 -DATA K<>+0xc0(SB)/4, $0x19a4c116 -DATA K<>+0xc4(SB)/4, $0x1e376c08 -DATA K<>+0xc8(SB)/4, $0x2748774c -DATA K<>+0xcc(SB)/4, $0x34b0bcb5 -DATA K<>+0xd0(SB)/4, $0x391c0cb3 -DATA K<>+0xd4(SB)/4, $0x4ed8aa4a -DATA K<>+0xd8(SB)/4, $0x5b9cca4f -DATA K<>+0xdc(SB)/4, $0x682e6ff3 -DATA K<>+0xe0(SB)/4, $0x748f82ee -DATA K<>+0xe4(SB)/4, $0x78a5636f -DATA K<>+0xe8(SB)/4, $0x84c87814 -DATA K<>+0xec(SB)/4, $0x8cc70208 -DATA K<>+0xf0(SB)/4, $0x90befffa -DATA K<>+0xf4(SB)/4, $0xa4506ceb -DATA K<>+0xf8(SB)/4, $0xbef9a3f7 -DATA K<>+0xfc(SB)/4, $0xc67178f2 -GLOBL K<>(SB), RODATA|NOPTR, $256 - -DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203 -DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b -GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16 - -// Register Usage -// BX base address of constant table (constant) -// DX hash_state (constant) -// SI hash_data.data -// DI hash_data.data + hash_data.length - 64 (constant) -// X0 scratch -// X1 scratch -// X2 working hash state // ABEF -// X3 working hash state // CDGH -// X4 first 16 bytes of block -// X5 second 16 bytes of block -// X6 third 16 bytes of block -// X7 fourth 16 bytes of block -// X12 saved hash state // ABEF -// X13 saved hash state // CDGH -// X15 data shuffle mask (constant) - -TEXT ·blockSha(SB), NOSPLIT, $0-32 - MOVQ h+0(FP), DX - MOVQ message_base+8(FP), SI - MOVQ message_len+16(FP), DI - LEAQ -64(SI)(DI*1), DI - MOVOU (DX), X2 - MOVOU 16(DX), X1 - MOVO X2, X3 - PUNPCKLLQ X1, X2 - PUNPCKHLQ X1, X3 - PSHUFD $0x27, X2, X2 - PSHUFD $0x27, X3, X3 - MOVO SHUF_MASK<>(SB), X15 - LEAQ K<>(SB), BX - - JMP TEST - -LOOP: - MOVO X2, X12 - MOVO X3, X13 - - // load block and shuffle - MOVOU (SI), X4 - MOVOU 16(SI), X5 - MOVOU 32(SI), X6 - MOVOU 48(SI), X7 - PSHUFB X15, X4 - PSHUFB X15, X5 - PSHUFB X15, X6 - PSHUFB X15, X7 - -#define ROUND456 \ - PADDL X5, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X5, X1 \ - LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4 - PADDL X1, X6 \ - LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 - -#define ROUND567 \ - PADDL X6, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X6, X1 \ - LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4 - PADDL X1, X7 \ - LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 - -#define ROUND674 \ - PADDL X7, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X7, X1 \ - LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4 - PADDL X1, X4 \ - LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7 - -#define ROUND745 \ - PADDL X4, X0 \ - LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 - MOVO X4, X1 \ - LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4 - PADDL X1, X5 \ - LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4 - PSHUFD $0x4e, X0, X0 \ - LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 - LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4 - - // rounds 0-3 - MOVO (BX), X0 - PADDL X4, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 4-7 - MOVO 1*16(BX), X0 - PADDL X5, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 - - // rounds 8-11 - MOVO 2*16(BX), X0 - PADDL X6, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 - - MOVO 3*16(BX), X0; ROUND674 // rounds 12-15 - MOVO 4*16(BX), X0; ROUND745 // rounds 16-19 - MOVO 5*16(BX), X0; ROUND456 // rounds 20-23 - MOVO 6*16(BX), X0; ROUND567 // rounds 24-27 - MOVO 7*16(BX), X0; ROUND674 // rounds 28-31 - MOVO 8*16(BX), X0; ROUND745 // rounds 32-35 - MOVO 9*16(BX), X0; ROUND456 // rounds 36-39 - MOVO 10*16(BX), X0; ROUND567 // rounds 40-43 - MOVO 11*16(BX), X0; ROUND674 // rounds 44-47 - MOVO 12*16(BX), X0; ROUND745 // rounds 48-51 - - // rounds 52-55 - MOVO 13*16(BX), X0 - PADDL X5, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - MOVO X5, X1 - LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4 - PADDL X1, X6 - LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 56-59 - MOVO 14*16(BX), X0 - PADDL X6, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - MOVO X6, X1 - LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4 - PADDL X1, X7 - LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - // rounds 60-63 - MOVO 15*16(BX), X0 - PADDL X7, X0 - LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 - PSHUFD $0x4e, X0, X0 - LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 - - PADDL X12, X2 - PADDL X13, X3 - - ADDQ $64, SI - -TEST: - CMPQ SI, DI - JBE LOOP - - PSHUFD $0x4e, X3, X0 - LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0 - PSHUFD $0x4e, X2, X1 - LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f - PSHUFD $0x1b, X0, X0 - PSHUFD $0x1b, X1, X1 - - MOVOU X0, (DX) - MOVOU X1, 16(DX) - - RET diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go b/mantle/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go deleted file mode 100644 index 1ae2320b..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -//go:noescape -func blockSsse(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s b/mantle/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s deleted file mode 100644 index 7afb45c8..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s +++ /dev/null @@ -1,429 +0,0 @@ -//+build !noasm,!appengine - -// SHA256 implementation for SSSE3 - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// This code is based on an Intel White-Paper: -// "Fast SHA-256 Implementations on Intel Architecture Processors" -// -// together with the reference implementation from the following authors: -// James Guilford -// Kirk Yap -// Tim Chen -// -// For Golang it has been converted to Plan 9 assembly with the help of -// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 -// equivalents -// - -#include "textflag.h" - -#define ROTATE_XS \ - MOVOU X4, X15 \ - MOVOU X5, X4 \ - MOVOU X6, X5 \ - MOVOU X7, X6 \ - MOVOU X15, X7 - -// compute s0 four at a time and s1 two at a time -// compute W[-16] + W[-7] 4 at a time -#define FOUR_ROUNDS_AND_SCHED(a, b, c, d, e, f, g, h) \ - MOVL e, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL a, R14 \ // y1 = a - MOVOU X7, X0 \ - LONG $0x0f3a0f66; WORD $0x04c6 \ // PALIGNR XMM0,XMM6,0x4 /* XTMP0 = W[-7] */ - ROLL $23, R14 \ // y1 = a >> (22-13) - XORL e, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL f, R15 \ // y2 = f - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL a, R14 \ // y1 = a ^ (a >> (22-13) - XORL g, R15 \ // y2 = f^g - LONG $0xc4fe0f66 \ // PADDD XMM0,XMM4 /* XTMP0 = W[-7] + W[-16] */ - XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6) ) - ANDL e, R15 \ // y2 = (f^g)&e - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - \ - \ // compute s0 - \ - MOVOU X5, X1 \ - LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1,XMM4,0x4 /* XTMP1 = W[-15] */ - XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL g, R15 \ // y2 = CH = ((f^g)&e)^g - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+48(FP), R15 \ // y2 = k + w + S1 + CH - MOVL a, R13 \ // y0 = a - ADDL R15, h \ // h = h + S1 + CH + k + w - \ // ROTATE_ARGS - MOVL a, R15 \ // y2 = a - MOVOU X1, X2 \ - LONG $0xd2720f66; BYTE $0x07 \ // PSRLD XMM2,0x7 /* */ - ORL c, R13 \ // y0 = a|c - ADDL h, d \ // d = d + h + S1 + CH + k + w - ANDL c, R15 \ // y2 = a&c - MOVOU X1, X3 \ - LONG $0xf3720f66; BYTE $0x19 \ // PSLLD XMM3,0x19 /* */ - ANDL b, R13 \ // y0 = (a|c)&b - ADDL R14, h \ // h = h + S1 + CH + k + w + S0 - LONG $0xdaeb0f66 \ // POR XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, h \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL d, R13 \ // y0 = e - MOVL h, R14 \ // y1 = a - ROLL $18, R13 \ // y0 = e >> (25-11) - XORL d, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL e, R15 \ // y2 = f - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVOU X1, X2 \ - LONG $0xd2720f66; BYTE $0x12 \ // PSRLD XMM2,0x12 /* */ - XORL h, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL f, R15 \ // y2 = f^g - MOVOU X1, X8 \ - LONG $0x720f4166; WORD $0x03d0 \ // PSRLD XMM8,0x3 /* XTMP4 = W[-15] >> 3 */ - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - XORL d, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL d, R15 \ // y2 = (f^g)&e - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - LONG $0xf1720f66; BYTE $0x0e \ // PSLLD XMM1,0xe /* */ - XORL h, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - XORL f, R15 \ // y2 = CH = ((f^g)&e)^g - LONG $0xd9ef0f66 \ // PXOR XMM3,XMM1 /* */ - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+52(FP), R15 \ // y2 = k + w + S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - LONG $0xdaef0f66 \ // PXOR XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR */ - MOVL h, R13 \ // y0 = a - ADDL R15, g \ // h = h + S1 + CH + k + w - MOVL h, R15 \ // y2 = a - MOVOU X3, X1 \ - LONG $0xef0f4166; BYTE $0xc8 \ // PXOR XMM1,XMM8 /* XTMP1 = s0 */ - ORL b, R13 \ // y0 = a|c - ADDL g, c \ // d = d + h + S1 + CH + k + w - ANDL b, R15 \ // y2 = a&c - \ - \ // compute low s1 - \ - LONG $0xd7700f66; BYTE $0xfa \ // PSHUFD XMM2,XMM7,0xfa /* XTMP2 = W[-2] {BBAA} */ - ANDL a, R13 \ // y0 = (a|c)&b - ADDL R14, g \ // h = h + S1 + CH + k + w + S0 - LONG $0xc1fe0f66 \ // PADDD XMM0,XMM1 /* XTMP0 = W[-16] + W[-7] + s0 */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, g \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL c, R13 \ // y0 = e - MOVL g, R14 \ // y1 = a - ROLL $18, R13 \ // y0 = e >> (25-11) - XORL c, R13 \ // y0 = e ^ (e >> (25-11)) - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVL d, R15 \ // y2 = f - XORL g, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - MOVOU X2, X8 \ - LONG $0x720f4166; WORD $0x0ad0 \ // PSRLD XMM8,0xa /* XTMP4 = W[-2] >> 10 {BBAA} */ - XORL e, R15 \ // y2 = f^g - MOVOU X2, X3 \ - LONG $0xd3730f66; BYTE $0x13 \ // PSRLQ XMM3,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */ - XORL c, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL c, R15 \ // y2 = (f^g)&e - LONG $0xd2730f66; BYTE $0x11 \ // PSRLQ XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */ - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - XORL g, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - XORL e, R15 \ // y2 = CH = ((f^g)&e)^g - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - LONG $0xd3ef0f66 \ // PXOR XMM2,XMM3 /* */ - ADDL R13, R15 \ // y2 = S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL _xfer+56(FP), R15 \ // y2 = k + w + S1 + CH - LONG $0xef0f4466; BYTE $0xc2 \ // PXOR XMM8,XMM2 /* XTMP4 = s1 {xBxA} */ - MOVL g, R13 \ // y0 = a - ADDL R15, f \ // h = h + S1 + CH + k + w - MOVL g, R15 \ // y2 = a - LONG $0x380f4566; WORD $0xc200 \ // PSHUFB XMM8,XMM10 /* XTMP4 = s1 {00BA} */ - ORL a, R13 \ // y0 = a|c - ADDL f, b \ // d = d + h + S1 + CH + k + w - ANDL a, R15 \ // y2 = a&c - LONG $0xfe0f4166; BYTE $0xc0 \ // PADDD XMM0,XMM8 /* XTMP0 = {..., ..., W[1], W[0]} */ - ANDL h, R13 \ // y0 = (a|c)&b - ADDL R14, f \ // h = h + S1 + CH + k + w + S0 - \ - \ // compute high s1 - \ - LONG $0xd0700f66; BYTE $0x50 \ // PSHUFD XMM2,XMM0,0x50 /* XTMP2 = W[-2] {DDCC} */ - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, f \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - MOVL b, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL f, R14 \ // y1 = a - ROLL $23, R14 \ // y1 = a >> (22-13) - XORL b, R13 \ // y0 = e ^ (e >> (25-11)) - MOVL c, R15 \ // y2 = f - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - MOVOU X2, X11 \ - LONG $0x720f4166; WORD $0x0ad3 \ // PSRLD XMM11,0xa /* XTMP5 = W[-2] >> 10 {DDCC} */ - XORL f, R14 \ // y1 = a ^ (a >> (22-13) - XORL d, R15 \ // y2 = f^g - MOVOU X2, X3 \ - LONG $0xd3730f66; BYTE $0x13 \ // PSRLQ XMM3,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */ - XORL b, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ANDL b, R15 \ // y2 = (f^g)&e - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - LONG $0xd2730f66; BYTE $0x11 \ // PSRLQ XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */ - XORL f, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL d, R15 \ // y2 = CH = ((f^g)&e)^g - LONG $0xd3ef0f66 \ // PXOR XMM2,XMM3 /* */ - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL R13, R15 \ // y2 = S1 + CH - ADDL _xfer+60(FP), R15 \ // y2 = k + w + S1 + CH - LONG $0xef0f4466; BYTE $0xda \ // PXOR XMM11,XMM2 /* XTMP5 = s1 {xDxC} */ - MOVL f, R13 \ // y0 = a - ADDL R15, e \ // h = h + S1 + CH + k + w - MOVL f, R15 \ // y2 = a - LONG $0x380f4566; WORD $0xdc00 \ // PSHUFB XMM11,XMM12 /* XTMP5 = s1 {DC00} */ - ORL h, R13 \ // y0 = a|c - ADDL e, a \ // d = d + h + S1 + CH + k + w - ANDL h, R15 \ // y2 = a&c - MOVOU X11, X4 \ - LONG $0xe0fe0f66 \ // PADDD XMM4,XMM0 /* X0 = {W[3], W[2], W[1], W[0]} */ - ANDL g, R13 \ // y0 = (a|c)&b - ADDL R14, e \ // h = h + S1 + CH + k + w + S0 - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, e \ // h = h + S1 + CH + k + w + S0 + MAJ - \ // ROTATE_ARGS - ROTATE_XS - -#define DO_ROUND(a, b, c, d, e, f, g, h, offset) \ - MOVL e, R13 \ // y0 = e - ROLL $18, R13 \ // y0 = e >> (25-11) - MOVL a, R14 \ // y1 = a - XORL e, R13 \ // y0 = e ^ (e >> (25-11)) - ROLL $23, R14 \ // y1 = a >> (22-13) - MOVL f, R15 \ // y2 = f - XORL a, R14 \ // y1 = a ^ (a >> (22-13) - ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) - XORL g, R15 \ // y2 = f^g - XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) - ANDL e, R15 \ // y2 = (f^g)&e - XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - XORL g, R15 \ // y2 = CH = ((f^g)&e)^g - ADDL R13, R15 \ // y2 = S1 + CH - ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - ADDL _xfer+offset(FP), R15 \ // y2 = k + w + S1 + CH - MOVL a, R13 \ // y0 = a - ADDL R15, h \ // h = h + S1 + CH + k + w - MOVL a, R15 \ // y2 = a - ORL c, R13 \ // y0 = a|c - ADDL h, d \ // d = d + h + S1 + CH + k + w - ANDL c, R15 \ // y2 = a&c - ANDL b, R13 \ // y0 = (a|c)&b - ADDL R14, h \ // h = h + S1 + CH + k + w + S0 - ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) - ADDL R13, h // h = h + S1 + CH + k + w + S0 + MAJ - -// func blockSsse(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) -TEXT ·blockSsse(SB), 7, $0-80 - - MOVQ h+0(FP), SI // SI: &h - MOVQ message_base+24(FP), R8 // &message - MOVQ message_len+32(FP), R9 // length of message - CMPQ R9, $0 - JEQ done_hash - ADDQ R8, R9 - MOVQ R9, reserved2+64(FP) // store end of message - - // Register definition - // a --> eax - // b --> ebx - // c --> ecx - // d --> r8d - // e --> edx - // f --> r9d - // g --> r10d - // h --> r11d - // - // y0 --> r13d - // y1 --> r14d - // y2 --> r15d - - MOVL (0*4)(SI), AX // a = H0 - MOVL (1*4)(SI), BX // b = H1 - MOVL (2*4)(SI), CX // c = H2 - MOVL (3*4)(SI), R8 // d = H3 - MOVL (4*4)(SI), DX // e = H4 - MOVL (5*4)(SI), R9 // f = H5 - MOVL (6*4)(SI), R10 // g = H6 - MOVL (7*4)(SI), R11 // h = H7 - - MOVOU bflipMask<>(SB), X13 - MOVOU shuf00BA<>(SB), X10 // shuffle xBxA -> 00BA - MOVOU shufDC00<>(SB), X12 // shuffle xDxC -> DC00 - - MOVQ message_base+24(FP), SI // SI: &message - -loop0: - LEAQ constants<>(SB), BP - - // byte swap first 16 dwords - MOVOU 0*16(SI), X4 - LONG $0x380f4166; WORD $0xe500 // PSHUFB XMM4, XMM13 - MOVOU 1*16(SI), X5 - LONG $0x380f4166; WORD $0xed00 // PSHUFB XMM5, XMM13 - MOVOU 2*16(SI), X6 - LONG $0x380f4166; WORD $0xf500 // PSHUFB XMM6, XMM13 - MOVOU 3*16(SI), X7 - LONG $0x380f4166; WORD $0xfd00 // PSHUFB XMM7, XMM13 - - MOVQ SI, reserved3+72(FP) - MOVD $0x3, DI - - // Align - // nop WORD PTR [rax+rax*1+0x0] - - // schedule 48 input dwords, by doing 3 rounds of 16 each -loop1: - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x004d // PADDD XMM9, 0[RBP] /* Add 1st constant to first part of message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) - - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x104d // PADDD XMM9, 16[RBP] /* Add 2nd constant to message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) - - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x204d // PADDD XMM9, 32[RBP] /* Add 3rd constant to message */ - MOVOU X9, reserved0+48(FP) - FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) - - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x304d // PADDD XMM9, 48[RBP] /* Add 4th constant to message */ - MOVOU X9, reserved0+48(FP) - ADDQ $64, BP - FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) - - SUBQ $1, DI - JNE loop1 - - MOVD $0x2, DI - -loop2: - MOVOU X4, X9 - LONG $0xfe0f4466; WORD $0x004d // PADDD XMM9, 0[RBP] /* Add 1st constant to first part of message */ - MOVOU X9, reserved0+48(FP) - DO_ROUND( AX, BX, CX, R8, DX, R9, R10, R11, 48) - DO_ROUND(R11, AX, BX, CX, R8, DX, R9, R10, 52) - DO_ROUND(R10, R11, AX, BX, CX, R8, DX, R9, 56) - DO_ROUND( R9, R10, R11, AX, BX, CX, R8, DX, 60) - - MOVOU X5, X9 - LONG $0xfe0f4466; WORD $0x104d // PADDD XMM9, 16[RBP] /* Add 2nd constant to message */ - MOVOU X9, reserved0+48(FP) - ADDQ $32, BP - DO_ROUND( DX, R9, R10, R11, AX, BX, CX, R8, 48) - DO_ROUND( R8, DX, R9, R10, R11, AX, BX, CX, 52) - DO_ROUND( CX, R8, DX, R9, R10, R11, AX, BX, 56) - DO_ROUND( BX, CX, R8, DX, R9, R10, R11, AX, 60) - - MOVOU X6, X4 - MOVOU X7, X5 - - SUBQ $1, DI - JNE loop2 - - MOVQ h+0(FP), SI // SI: &h - ADDL (0*4)(SI), AX // H0 = a + H0 - MOVL AX, (0*4)(SI) - ADDL (1*4)(SI), BX // H1 = b + H1 - MOVL BX, (1*4)(SI) - ADDL (2*4)(SI), CX // H2 = c + H2 - MOVL CX, (2*4)(SI) - ADDL (3*4)(SI), R8 // H3 = d + H3 - MOVL R8, (3*4)(SI) - ADDL (4*4)(SI), DX // H4 = e + H4 - MOVL DX, (4*4)(SI) - ADDL (5*4)(SI), R9 // H5 = f + H5 - MOVL R9, (5*4)(SI) - ADDL (6*4)(SI), R10 // H6 = g + H6 - MOVL R10, (6*4)(SI) - ADDL (7*4)(SI), R11 // H7 = h + H7 - MOVL R11, (7*4)(SI) - - MOVQ reserved3+72(FP), SI - ADDQ $64, SI - CMPQ reserved2+64(FP), SI - JNE loop0 - -done_hash: - RET - -// Constants table -DATA constants<>+0x0(SB)/8, $0x71374491428a2f98 -DATA constants<>+0x8(SB)/8, $0xe9b5dba5b5c0fbcf -DATA constants<>+0x10(SB)/8, $0x59f111f13956c25b -DATA constants<>+0x18(SB)/8, $0xab1c5ed5923f82a4 -DATA constants<>+0x20(SB)/8, $0x12835b01d807aa98 -DATA constants<>+0x28(SB)/8, $0x550c7dc3243185be -DATA constants<>+0x30(SB)/8, $0x80deb1fe72be5d74 -DATA constants<>+0x38(SB)/8, $0xc19bf1749bdc06a7 -DATA constants<>+0x40(SB)/8, $0xefbe4786e49b69c1 -DATA constants<>+0x48(SB)/8, $0x240ca1cc0fc19dc6 -DATA constants<>+0x50(SB)/8, $0x4a7484aa2de92c6f -DATA constants<>+0x58(SB)/8, $0x76f988da5cb0a9dc -DATA constants<>+0x60(SB)/8, $0xa831c66d983e5152 -DATA constants<>+0x68(SB)/8, $0xbf597fc7b00327c8 -DATA constants<>+0x70(SB)/8, $0xd5a79147c6e00bf3 -DATA constants<>+0x78(SB)/8, $0x1429296706ca6351 -DATA constants<>+0x80(SB)/8, $0x2e1b213827b70a85 -DATA constants<>+0x88(SB)/8, $0x53380d134d2c6dfc -DATA constants<>+0x90(SB)/8, $0x766a0abb650a7354 -DATA constants<>+0x98(SB)/8, $0x92722c8581c2c92e -DATA constants<>+0xa0(SB)/8, $0xa81a664ba2bfe8a1 -DATA constants<>+0xa8(SB)/8, $0xc76c51a3c24b8b70 -DATA constants<>+0xb0(SB)/8, $0xd6990624d192e819 -DATA constants<>+0xb8(SB)/8, $0x106aa070f40e3585 -DATA constants<>+0xc0(SB)/8, $0x1e376c0819a4c116 -DATA constants<>+0xc8(SB)/8, $0x34b0bcb52748774c -DATA constants<>+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA constants<>+0xd8(SB)/8, $0x682e6ff35b9cca4f -DATA constants<>+0xe0(SB)/8, $0x78a5636f748f82ee -DATA constants<>+0xe8(SB)/8, $0x8cc7020884c87814 -DATA constants<>+0xf0(SB)/8, $0xa4506ceb90befffa -DATA constants<>+0xf8(SB)/8, $0xc67178f2bef9a3f7 - -DATA bflipMask<>+0x00(SB)/8, $0x0405060700010203 -DATA bflipMask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b - -DATA shuf00BA<>+0x00(SB)/8, $0x0b0a090803020100 -DATA shuf00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA shufDC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA shufDC00<>+0x08(SB)/8, $0x0b0a090803020100 - -GLOBL constants<>(SB), 8, $256 -GLOBL bflipMask<>(SB), (NOPTR+RODATA), $16 -GLOBL shuf00BA<>(SB), (NOPTR+RODATA), $16 -GLOBL shufDC00<>(SB), (NOPTR+RODATA), $16 diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/mantle/vendor/github.com/minio/sha256-simd/sha256block_amd64.go deleted file mode 100644 index 1c4d97f0..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256block_amd64.go +++ /dev/null @@ -1,53 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockArmGo(dig *digest, p []byte) {} - -func blockAvxGo(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockAvx(h[:], p[:], 0, 0, 0, 0) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] -} - -func blockAvx2Go(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockAvx2(h[:], p[:]) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] -} - -func blockSsseGo(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockSsse(h[:], p[:], 0, 0, 0, 0) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] -} - -func blockShaGo(dig *digest, p []byte) { - - blockSha(&dig.h, p) -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/mantle/vendor/github.com/minio/sha256-simd/sha256block_arm64.go deleted file mode 100644 index 0979c20a..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256block_arm64.go +++ /dev/null @@ -1,37 +0,0 @@ -//+build !noasm,!appengine - -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockAvx2Go(dig *digest, p []byte) {} -func blockAvxGo(dig *digest, p []byte) {} -func blockSsseGo(dig *digest, p []byte) {} -func blockShaGo(dig *digest, p []byte) {} - -//go:noescape -func blockArm(h []uint32, message []uint8) - -func blockArmGo(dig *digest, p []byte) { - - h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} - - blockArm(h[:], p[:]) - - dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], - h[5], h[6], h[7] -} diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/mantle/vendor/github.com/minio/sha256-simd/sha256block_arm64.s deleted file mode 100644 index c6ddb371..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256block_arm64.s +++ /dev/null @@ -1,192 +0,0 @@ -//+build !noasm,!appengine - -// ARM64 version of SHA256 - -// -// Minio Cloud Storage, (C) 2016 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// -// Based on implementation as found in https://github.com/jocover/sha256-armv8 -// -// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to -// their Plan9 equivalents -// - -TEXT ·blockArm(SB), 7, $0 - MOVD h+0(FP), R0 - MOVD message+24(FP), R1 - MOVD message_len+32(FP), R2 // length of message - SUBS $64, R2 - BMI complete - - // Load constants table pointer - MOVD $·constants(SB), R3 - - // Cache constants table in registers v16 - v31 - WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64 - WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16 - WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64 - - WORD $0x4c407801 // ld1 {v1.4s}, [x0] - WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64 - WORD $0xd1004000 // sub x0, x0, #0x10 - WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64 - -loop: - // Main loop - WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64 - WORD $0x4ea01c02 // mov v2.16b, v0.16b - WORD $0x4ea11c23 // mov v3.16b, v1.16b - WORD $0x6e2008a5 // rev32 v5.16b, v5.16b - WORD $0x6e2008c6 // rev32 v6.16b, v6.16b - WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s - WORD $0x6e2008e7 // rev32 v7.16b, v7.16b - WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x6e200908 // rev32 v8.16b, v8.16b - WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s - WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s - WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e282907 // sha256su0 v7.4s, v8.4s - WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s - WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s - WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s - WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s - WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e094062 // sha256h q2, q3, v9.4s - WORD $0x5e095083 // sha256h2 q3, q4, v9.4s - WORD $0x4ea21c44 // mov v4.16b, v2.16b - WORD $0x5e0a4062 // sha256h q2, q3, v10.4s - WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s - WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s - WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s - - SUBS $64, R2 - BPL loop - - // Store result - WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0] - -complete: - RET - -// Constants table -DATA ·constants+0x0(SB)/8, $0x71374491428a2f98 -DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf -DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b -DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4 -DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98 -DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be -DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74 -DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7 -DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1 -DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6 -DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f -DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc -DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152 -DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8 -DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3 -DATA ·constants+0x78(SB)/8, $0x1429296706ca6351 -DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85 -DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc -DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354 -DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e -DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1 -DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70 -DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819 -DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585 -DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116 -DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c -DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 -DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f -DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee -DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814 -DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa -DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7 - -GLOBL ·constants(SB), 8, $256 - diff --git a/mantle/vendor/github.com/minio/sha256-simd/sha256block_other.go b/mantle/vendor/github.com/minio/sha256-simd/sha256block_other.go deleted file mode 100644 index 0187c950..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/sha256block_other.go +++ /dev/null @@ -1,25 +0,0 @@ -//+build appengine noasm !amd64,!arm64 - -/* - * Minio Cloud Storage, (C) 2019 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -func blockAvx2Go(dig *digest, p []byte) {} -func blockAvxGo(dig *digest, p []byte) {} -func blockSsseGo(dig *digest, p []byte) {} -func blockShaGo(dig *digest, p []byte) {} -func blockArmGo(dig *digest, p []byte) {} diff --git a/mantle/vendor/github.com/minio/sha256-simd/test-architectures.sh b/mantle/vendor/github.com/minio/sha256-simd/test-architectures.sh deleted file mode 100644 index 50150eaa..00000000 --- a/mantle/vendor/github.com/minio/sha256-simd/test-architectures.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -set -e - -go tool dist list | while IFS=/ read os arch; do - echo "Checking $os/$arch..." - echo " normal" - GOARCH=$arch GOOS=$os go build -o /dev/null ./... - echo " noasm" - GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null ./... - echo " appengine" - GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null ./... - echo " noasm,appengine" - GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null ./... -done diff --git a/mantle/vendor/github.com/mitchellh/go-homedir/go.mod b/mantle/vendor/github.com/mitchellh/go-homedir/go.mod deleted file mode 100644 index 7efa09a0..00000000 --- a/mantle/vendor/github.com/mitchellh/go-homedir/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/mitchellh/go-homedir diff --git a/mantle/vendor/github.com/pborman/uuid/go.mod b/mantle/vendor/github.com/pborman/uuid/go.mod deleted file mode 100644 index 099fc7de..00000000 --- a/mantle/vendor/github.com/pborman/uuid/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/pborman/uuid - -require github.com/google/uuid v1.0.0 diff --git a/mantle/vendor/github.com/pborman/uuid/go.sum b/mantle/vendor/github.com/pborman/uuid/go.sum deleted file mode 100644 index db2574a9..00000000 --- a/mantle/vendor/github.com/pborman/uuid/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/mantle/vendor/github.com/rs/xid/.appveyor.yml b/mantle/vendor/github.com/rs/xid/.appveyor.yml deleted file mode 100644 index c73bb33b..00000000 --- a/mantle/vendor/github.com/rs/xid/.appveyor.yml +++ /dev/null @@ -1,27 +0,0 @@ -version: 1.0.0.{build} - -platform: x64 - -branches: - only: - - master - -clone_folder: c:\gopath\src\github.com\rs\xid - -environment: - GOPATH: c:\gopath - -install: - - echo %PATH% - - echo %GOPATH% - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - - go get -t . - -build_script: - - go build - -test_script: - - go test - diff --git a/mantle/vendor/github.com/rs/xid/.travis.yml b/mantle/vendor/github.com/rs/xid/.travis.yml deleted file mode 100644 index b37da159..00000000 --- a/mantle/vendor/github.com/rs/xid/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: -- "1.9" -- "1.10" -- "master" -matrix: - allow_failures: - - go: "master" diff --git a/mantle/vendor/github.com/rs/xid/LICENSE b/mantle/vendor/github.com/rs/xid/LICENSE deleted file mode 100644 index 47c5e9d2..00000000 --- a/mantle/vendor/github.com/rs/xid/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Olivier Poitrey - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/mantle/vendor/github.com/rs/xid/README.md b/mantle/vendor/github.com/rs/xid/README.md deleted file mode 100644 index 1f886fd7..00000000 --- a/mantle/vendor/github.com/rs/xid/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# Globally Unique ID Generator - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid) - -Package xid is a globally unique id generator library, ready to be used safely directly in your server code. - -Xid is using Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string: -https://docs.mongodb.org/manual/reference/object-id/ - -- 4-byte value representing the seconds since the Unix epoch, -- 3-byte machine identifier, -- 2-byte process id, and -- 3-byte counter, starting with a random value. - -The binary representation of the id is compatible with Mongo 12 bytes Object IDs. -The string representation is using base32 hex (w/o padding) for better space efficiency -when stored in that form (20 bytes). The hex variant of base32 is used to retain the -sortable property of the id. - -Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an -issue when transported as a string between various systems. Base36 wasn't retained either -because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) -and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, -all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). - -UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake -ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central -generator servers. xid stands in between with 12 bytes (96 bits) and a more compact -URL-safe string representation (20 chars). No configuration or central generator server -is required so it can be used directly in server's code. - -| Name | Binary Size | String Size | Features -|-------------|-------------|----------------|---------------- -| [UUID] | 16 bytes | 36 chars | configuration free, not sortable -| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable -| [Snowflake] | 8 bytes | up to 20 chars | needs machin/DC configuration, needs central server, sortable -| [MongoID] | 12 bytes | 24 chars | configuration free, sortable -| xid | 12 bytes | 20 chars | configuration free, sortable - -[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier -[shortuuid]: https://github.com/stochastic-technologies/shortuuid -[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake -[MongoID]: https://docs.mongodb.org/manual/reference/object-id/ - -Features: - -- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake -- Base32 hex encoded by default (20 chars when transported as printable string, still sortable) -- Non configured, you don't need set a unique machine and/or data center id -- K-ordered -- Embedded time with 1 second precision -- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process -- Lock-free (i.e.: unlike UUIDv1 and v2) - -Best used with [zerolog](https://github.com/rs/zerolog)'s -[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler). - -Notes: - -- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most of the other UUID like implementations are also not cryptographically secure. You shoud use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator. - -References: - -- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems -- https://en.wikipedia.org/wiki/Universally_unique_identifier -- https://blog.twitter.com/2010/announcing-snowflake -- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid -- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride - -## Install - - go get github.com/rs/xid - -## Usage - -```go -guid := xid.New() - -println(guid.String()) -// Output: 9m4e2mr0ui3e8a215n4g -``` - -Get `xid` embedded info: - -```go -guid.Machine() -guid.Pid() -guid.Time() -guid.Counter() -``` - -## Benchmark - -Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid). - -``` -BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op -BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op -BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op -BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op -BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op -BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op -BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op -BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op -BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op -``` - -Note: UUIDv1 requires a global lock, hence the performence degrading as we add more CPUs. - -## Licenses - -All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE). diff --git a/mantle/vendor/github.com/rs/xid/go.mod b/mantle/vendor/github.com/rs/xid/go.mod deleted file mode 100644 index 95b83386..00000000 --- a/mantle/vendor/github.com/rs/xid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/rs/xid diff --git a/mantle/vendor/github.com/rs/xid/hostid_darwin.go b/mantle/vendor/github.com/rs/xid/hostid_darwin.go deleted file mode 100644 index 08351ff7..00000000 --- a/mantle/vendor/github.com/rs/xid/hostid_darwin.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin - -package xid - -import "syscall" - -func readPlatformMachineID() (string, error) { - return syscall.Sysctl("kern.uuid") -} diff --git a/mantle/vendor/github.com/rs/xid/hostid_fallback.go b/mantle/vendor/github.com/rs/xid/hostid_fallback.go deleted file mode 100644 index 7fbd3c00..00000000 --- a/mantle/vendor/github.com/rs/xid/hostid_fallback.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !darwin,!linux,!freebsd,!windows - -package xid - -import "errors" - -func readPlatformMachineID() (string, error) { - return "", errors.New("not implemented") -} diff --git a/mantle/vendor/github.com/rs/xid/hostid_freebsd.go b/mantle/vendor/github.com/rs/xid/hostid_freebsd.go deleted file mode 100644 index be25a039..00000000 --- a/mantle/vendor/github.com/rs/xid/hostid_freebsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build freebsd - -package xid - -import "syscall" - -func readPlatformMachineID() (string, error) { - return syscall.Sysctl("kern.hostuuid") -} diff --git a/mantle/vendor/github.com/rs/xid/hostid_linux.go b/mantle/vendor/github.com/rs/xid/hostid_linux.go deleted file mode 100644 index 7d0c4a9e..00000000 --- a/mantle/vendor/github.com/rs/xid/hostid_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux - -package xid - -import "io/ioutil" - -func readPlatformMachineID() (string, error) { - b, err := ioutil.ReadFile("/sys/class/dmi/id/product_uuid") - return string(b), err -} diff --git a/mantle/vendor/github.com/rs/xid/hostid_windows.go b/mantle/vendor/github.com/rs/xid/hostid_windows.go deleted file mode 100644 index ec2593ee..00000000 --- a/mantle/vendor/github.com/rs/xid/hostid_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build windows - -package xid - -import ( - "fmt" - "syscall" - "unsafe" -) - -func readPlatformMachineID() (string, error) { - // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go - var h syscall.Handle - err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) - if err != nil { - return "", err - } - defer syscall.RegCloseKey(h) - - const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 - const uuidLen = 36 - - var regBuf [syscallRegBufLen]uint16 - bufLen := uint32(syscallRegBufLen) - var valType uint32 - err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) - if err != nil { - return "", err - } - - hostID := syscall.UTF16ToString(regBuf[:]) - hostIDLen := len(hostID) - if hostIDLen != uuidLen { - return "", fmt.Errorf("HostID incorrect: %q\n", hostID) - } - - return hostID, nil -} diff --git a/mantle/vendor/github.com/rs/xid/id.go b/mantle/vendor/github.com/rs/xid/id.go deleted file mode 100644 index 466faf26..00000000 --- a/mantle/vendor/github.com/rs/xid/id.go +++ /dev/null @@ -1,365 +0,0 @@ -// Package xid is a globally unique id generator suited for web scale -// -// Xid is using Mongo Object ID algorithm to generate globally unique ids: -// https://docs.mongodb.org/manual/reference/object-id/ -// -// - 4-byte value representing the seconds since the Unix epoch, -// - 3-byte machine identifier, -// - 2-byte process id, and -// - 3-byte counter, starting with a random value. -// -// The binary representation of the id is compatible with Mongo 12 bytes Object IDs. -// The string representation is using base32 hex (w/o padding) for better space efficiency -// when stored in that form (20 bytes). The hex variant of base32 is used to retain the -// sortable property of the id. -// -// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an -// issue when transported as a string between various systems. Base36 wasn't retained either -// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) -// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, -// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). -// -// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between -// with 12 bytes with a more compact string representation ready for the web and no -// required configuration or central generation server. -// -// Features: -// -// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake -// - Base32 hex encoded by default (16 bytes storage when transported as printable string) -// - Non configured, you don't need set a unique machine and/or data center id -// - K-ordered -// - Embedded time with 1 second precision -// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process -// -// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler). -// -// References: -// -// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems -// - https://en.wikipedia.org/wiki/Universally_unique_identifier -// - https://blog.twitter.com/2010/announcing-snowflake -package xid - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io/ioutil" - "os" - "sort" - "sync/atomic" - "time" -) - -// Code inspired from mgo/bson ObjectId - -// ID represents a unique request id -type ID [rawLen]byte - -const ( - encodedLen = 20 // string encoded len - rawLen = 12 // binary raw len - - // encoding stores a custom version of the base32 encoding with lower case - // letters. - encoding = "0123456789abcdefghijklmnopqrstuv" -) - -var ( - // ErrInvalidID is returned when trying to unmarshal an invalid ID - ErrInvalidID = errors.New("xid: invalid ID") - - // objectIDCounter is atomically incremented when generating a new ObjectId - // using NewObjectId() function. It's used as a counter part of an id. - // This id is initialized with a random value. - objectIDCounter = randInt() - - // machineId stores machine id generated once and used in subsequent calls - // to NewObjectId function. - machineID = readMachineID() - - // pid stores the current process id - pid = os.Getpid() - - nilID ID - - // dec is the decoding map for base32 encoding - dec [256]byte -) - -func init() { - for i := 0; i < len(dec); i++ { - dec[i] = 0xFF - } - for i := 0; i < len(encoding); i++ { - dec[encoding[i]] = byte(i) - } - - // If /proc/self/cpuset exists and is not /, we can assume that we are in a - // form of container and use the content of cpuset xor-ed with the PID in - // order get a reasonable machine global unique PID. - b, err := ioutil.ReadFile("/proc/self/cpuset") - if err == nil && len(b) > 1 { - pid ^= int(crc32.ChecksumIEEE(b)) - } -} - -// readMachineId generates machine id and puts it into the machineId global -// variable. If this function fails to get the hostname, it will cause -// a runtime error. -func readMachineID() []byte { - id := make([]byte, 3) - hid, err := readPlatformMachineID() - if err != nil || len(hid) == 0 { - hid, err = os.Hostname() - } - if err == nil && len(hid) != 0 { - hw := md5.New() - hw.Write([]byte(hid)) - copy(id, hw.Sum(nil)) - } else { - // Fallback to rand number if machine id can't be gathered - if _, randErr := rand.Reader.Read(id); randErr != nil { - panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr)) - } - } - return id -} - -// randInt generates a random uint32 -func randInt() uint32 { - b := make([]byte, 3) - if _, err := rand.Reader.Read(b); err != nil { - panic(fmt.Errorf("xid: cannot generate random number: %v;", err)) - } - return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]) -} - -// New generates a globally unique ID -func New() ID { - return NewWithTime(time.Now()) -} - -// NewWithTime generates a globally unique ID with the passed in time -func NewWithTime(t time.Time) ID { - var id ID - // Timestamp, 4 bytes, big endian - binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) - // Machine, first 3 bytes of md5(hostname) - id[4] = machineID[0] - id[5] = machineID[1] - id[6] = machineID[2] - // Pid, 2 bytes, specs don't specify endianness, but we use big endian. - id[7] = byte(pid >> 8) - id[8] = byte(pid) - // Increment, 3 bytes, big endian - i := atomic.AddUint32(&objectIDCounter, 1) - id[9] = byte(i >> 16) - id[10] = byte(i >> 8) - id[11] = byte(i) - return id -} - -// FromString reads an ID from its string representation -func FromString(id string) (ID, error) { - i := &ID{} - err := i.UnmarshalText([]byte(id)) - return *i, err -} - -// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v). -func (id ID) String() string { - text := make([]byte, encodedLen) - encode(text, id[:]) - return string(text) -} - -// MarshalText implements encoding/text TextMarshaler interface -func (id ID) MarshalText() ([]byte, error) { - text := make([]byte, encodedLen) - encode(text, id[:]) - return text, nil -} - -// MarshalJSON implements encoding/json Marshaler interface -func (id ID) MarshalJSON() ([]byte, error) { - if id.IsNil() { - return []byte("null"), nil - } - text, err := id.MarshalText() - return []byte(`"` + string(text) + `"`), err -} - -// encode by unrolling the stdlib base32 algorithm + removing all safe checks -func encode(dst, id []byte) { - dst[0] = encoding[id[0]>>3] - dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F] - dst[2] = encoding[(id[1]>>1)&0x1F] - dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F] - dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] - dst[5] = encoding[(id[3]>>2)&0x1F] - dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] - dst[7] = encoding[id[4]&0x1F] - dst[8] = encoding[id[5]>>3] - dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F] - dst[10] = encoding[(id[6]>>1)&0x1F] - dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F] - dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] - dst[13] = encoding[(id[8]>>2)&0x1F] - dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] - dst[15] = encoding[id[9]&0x1F] - dst[16] = encoding[id[10]>>3] - dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] - dst[18] = encoding[(id[11]>>1)&0x1F] - dst[19] = encoding[(id[11]<<4)&0x1F] -} - -// UnmarshalText implements encoding/text TextUnmarshaler interface -func (id *ID) UnmarshalText(text []byte) error { - if len(text) != encodedLen { - return ErrInvalidID - } - for _, c := range text { - if dec[c] == 0xFF { - return ErrInvalidID - } - } - decode(id, text) - return nil -} - -// UnmarshalJSON implements encoding/json Unmarshaler interface -func (id *ID) UnmarshalJSON(b []byte) error { - s := string(b) - if s == "null" { - *id = nilID - return nil - } - return id.UnmarshalText(b[1 : len(b)-1]) -} - -// decode by unrolling the stdlib base32 algorithm + removing all safe checks -func decode(id *ID, src []byte) { - id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 - id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 - id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 - id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3 - id[4] = dec[src[6]]<<5 | dec[src[7]] - id[5] = dec[src[8]]<<3 | dec[src[9]]>>2 - id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4 - id[7] = dec[src[11]]<<4 | dec[src[12]]>>1 - id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 - id[9] = dec[src[14]]<<5 | dec[src[15]] - id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 - id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 -} - -// Time returns the timestamp part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ID) Time() time.Time { - // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. - secs := int64(binary.BigEndian.Uint32(id[0:4])) - return time.Unix(secs, 0) -} - -// Machine returns the 3-byte machine id part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ID) Machine() []byte { - return id[4:7] -} - -// Pid returns the process id part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ID) Pid() uint16 { - return binary.BigEndian.Uint16(id[7:9]) -} - -// Counter returns the incrementing value part of the id. -// It's a runtime error to call this method with an invalid id. -func (id ID) Counter() int32 { - b := id[9:12] - // Counter is stored as big-endian 3-byte value - return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) -} - -// Value implements the driver.Valuer interface. -func (id ID) Value() (driver.Value, error) { - if id.IsNil() { - return nil, nil - } - b, err := id.MarshalText() - return string(b), err -} - -// Scan implements the sql.Scanner interface. -func (id *ID) Scan(value interface{}) (err error) { - switch val := value.(type) { - case string: - return id.UnmarshalText([]byte(val)) - case []byte: - return id.UnmarshalText(val) - case nil: - *id = nilID - return nil - default: - return fmt.Errorf("xid: scanning unsupported type: %T", value) - } -} - -// IsNil Returns true if this is a "nil" ID -func (id ID) IsNil() bool { - return id == nilID -} - -// NilID returns a zero value for `xid.ID`. -func NilID() ID { - return nilID -} - -// Bytes returns the byte array representation of `ID` -func (id ID) Bytes() []byte { - return id[:] -} - -// FromBytes convert the byte array representation of `ID` back to `ID` -func FromBytes(b []byte) (ID, error) { - var id ID - if len(b) != rawLen { - return id, ErrInvalidID - } - copy(id[:], b) - return id, nil -} - -// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`. -// The result will be 0 if two IDs are identical, -1 if current id is less than the other one, -// and 1 if current id is greater than the other. -func (id ID) Compare(other ID) int { - return bytes.Compare(id[:], other[:]) -} - -type sorter []ID - -func (s sorter) Len() int { - return len(s) -} - -func (s sorter) Less(i, j int) bool { - return s[i].Compare(s[j]) < 0 -} - -func (s sorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Sort sorts an array of IDs inplace. -// It works by wrapping `[]ID` and use `sort.Sort`. -func Sort(ids []ID) { - sort.Sort(sorter(ids)) -} diff --git a/mantle/vendor/github.com/sirupsen/logrus/README.md b/mantle/vendor/github.com/sirupsen/logrus/README.md index 5152b6aa..b042c896 100644 --- a/mantle/vendor/github.com/sirupsen/logrus/README.md +++ b/mantle/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. @@ -341,7 +341,7 @@ import ( log "github.com/sirupsen/logrus" ) -init() { +func init() { // do something here to set environment depending on an environment variable // or command-line flag if Environment == "production" { diff --git a/mantle/vendor/github.com/sirupsen/logrus/buffer_pool.go b/mantle/vendor/github.com/sirupsen/logrus/buffer_pool.go index 4545dec0..c7787f77 100644 --- a/mantle/vendor/github.com/sirupsen/logrus/buffer_pool.go +++ b/mantle/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -26,15 +26,6 @@ func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } -func getBuffer() *bytes.Buffer { - return bufferPool.Get() -} - -func putBuffer(buf *bytes.Buffer) { - buf.Reset() - bufferPool.Put(buf) -} - // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { diff --git a/mantle/vendor/github.com/sirupsen/logrus/entry.go b/mantle/vendor/github.com/sirupsen/logrus/entry.go index 07a1e5fa..71cdbbc3 100644 --- a/mantle/vendor/github.com/sirupsen/logrus/entry.go +++ b/mantle/vendor/github.com/sirupsen/logrus/entry.go @@ -232,6 +232,7 @@ func (entry *Entry) log(level Level, msg string) { newEntry.Logger.mu.Lock() reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() newEntry.Logger.mu.Unlock() if reportCaller { @@ -239,11 +240,11 @@ func (entry *Entry) log(level Level, msg string) { } newEntry.fireHooks() - - buffer = getBuffer() + buffer = bufPool.Get() defer func() { newEntry.Buffer = nil - putBuffer(buffer) + buffer.Reset() + bufPool.Put(buffer) }() buffer.Reset() newEntry.Buffer = buffer @@ -260,6 +261,13 @@ func (entry *Entry) log(level Level, msg string) { } } +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + func (entry *Entry) fireHooks() { var tmpHooks LevelHooks entry.Logger.mu.Lock() @@ -276,18 +284,21 @@ func (entry *Entry) fireHooks() { } func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) diff --git a/mantle/vendor/github.com/sirupsen/logrus/go.mod b/mantle/vendor/github.com/sirupsen/logrus/go.mod deleted file mode 100644 index b3919d5e..00000000 --- a/mantle/vendor/github.com/sirupsen/logrus/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/sirupsen/logrus - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 -) - -go 1.13 diff --git a/mantle/vendor/github.com/sirupsen/logrus/go.sum b/mantle/vendor/github.com/sirupsen/logrus/go.sum deleted file mode 100644 index 694c18b8..00000000 --- a/mantle/vendor/github.com/sirupsen/logrus/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/mantle/vendor/github.com/sirupsen/logrus/logger.go b/mantle/vendor/github.com/sirupsen/logrus/logger.go index 33770445..5ff0aef6 100644 --- a/mantle/vendor/github.com/sirupsen/logrus/logger.go +++ b/mantle/vendor/github.com/sirupsen/logrus/logger.go @@ -44,6 +44,9 @@ type Logger struct { entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool } type exitFunc func(int) @@ -192,6 +195,9 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() @@ -402,3 +408,10 @@ func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Unlock() return oldHooks } + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/mantle/vendor/github.com/spf13/cobra/.gitignore b/mantle/vendor/github.com/spf13/cobra/.gitignore index b2b848e7..c7b459e4 100644 --- a/mantle/vendor/github.com/spf13/cobra/.gitignore +++ b/mantle/vendor/github.com/spf13/cobra/.gitignore @@ -32,8 +32,8 @@ Session.vim tags *.exe -cobra cobra.test +bin .idea/ *.iml diff --git a/mantle/vendor/github.com/spf13/cobra/.golangci.yml b/mantle/vendor/github.com/spf13/cobra/.golangci.yml new file mode 100644 index 00000000..0d6e6179 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/.golangci.yml @@ -0,0 +1,48 @@ +run: + deadline: 5m + +linters: + disable-all: true + enable: + #- bodyclose + - deadcode + #- depguard + #- dogsled + #- dupl + - errcheck + #- exhaustive + #- funlen + - gas + #- gochecknoinits + - goconst + #- gocritic + #- gocyclo + #- gofmt + - goimports + - golint + #- gomnd + #- goprintffuncname + #- gosec + #- gosimple + - govet + - ineffassign + - interfacer + #- lll + - maligned + - megacheck + #- misspell + #- nakedret + #- noctx + #- nolintlint + #- rowserrcheck + #- scopelint + #- staticcheck + - structcheck + #- stylecheck + #- typecheck + - unconvert + #- unparam + #- unused + - varcheck + #- whitespace + fast: false diff --git a/mantle/vendor/github.com/spf13/cobra/.travis.yml b/mantle/vendor/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index fca1e694..00000000 --- a/mantle/vendor/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go - -stages: - - diff - - test - -go: - - 1.10.x - - 1.11.x - - 1.12.x - - tip - -matrix: - allow_failures: - - go: tip - include: - - stage: diff - go: 1.12.x - script: diff -u <(echo -n) <(gofmt -d -s .) - -before_install: go get -u github.com/kyoh86/richgo - -script: - - richgo test -v ./... - - go build - - if [ -z $NOVET ]; then - diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); - fi diff --git a/mantle/vendor/github.com/spf13/cobra/CONDUCT.md b/mantle/vendor/github.com/spf13/cobra/CONDUCT.md new file mode 100644 index 00000000..9d16f88f --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/CONDUCT.md @@ -0,0 +1,37 @@ +## Cobra User Contract + +### Versioning +Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. + +### Backward Compatibility +We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. + +### Deprecation +Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. + +### CVE +Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. + +### Communication +Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. + +### Breaking Changes +Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. + +There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. + +Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. + +Examples of breaking changes include: +- Removing or renaming exported constant, variable, type, or function. +- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... + - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. + +There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. + +### CI Testing +Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. + +### Disclaimer +Changes to this document and the contents therein are at the discretion of the maintainers. +None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/mantle/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/mantle/vendor/github.com/spf13/cobra/CONTRIBUTING.md new file mode 100644 index 00000000..6f356e6a --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# Contributing to Cobra + +Thank you so much for contributing to Cobra. We appreciate your time and help. +Here are some guidelines to help you get started. + +## Code of Conduct + +Be kind and respectful to the members of the community. Take time to educate +others who are seeking help. Harassment of any kind will not be tolerated. + +## Questions + +If you have questions regarding Cobra, feel free to ask it in the community +[#cobra Slack channel][cobra-slack] + +## Filing a bug or feature + +1. Before filing an issue, please check the existing issues to see if a + similar one was already opened. If there is one already opened, feel free + to comment on it. +1. If you believe you've found a bug, please provide detailed steps of + reproduction, the version of Cobra and anything else you believe will be + useful to help troubleshoot it (e.g. OS environment, environment variables, + etc...). Also state the current behavior vs. the expected behavior. +1. If you'd like to see a feature or an enhancement please open an issue with + a clear title and description of what the feature is and why it would be + beneficial to the project and its users. + +## Submitting changes + +1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to + sign a CLA. Please sign the CLA :slightly_smiling_face: +1. Tests: If you are submitting code, please ensure you have adequate tests + for the feature. Tests can be run via `go test ./...` or `make test`. +1. Since this is golang project, ensure the new code is properly formatted to + ensure code consistency. Run `make all`. + +### Quick steps to contribute + +1. Fork the project. +1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Make changes and run tests (`make test`) +1. Add them to staging (`git add .`) +1. Commit your changes (`git commit -m 'Add some feature'`) +1. Push to the branch (`git push origin my-new-feature`) +1. Create new pull request + + +[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199 diff --git a/mantle/vendor/github.com/spf13/cobra/MAINTAINERS b/mantle/vendor/github.com/spf13/cobra/MAINTAINERS new file mode 100644 index 00000000..4c5ac3dd --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/MAINTAINERS @@ -0,0 +1,13 @@ +maintainers: +- spf13 +- johnSchnake +- jpmcb +- marckhouzam +inactive: +- anthonyfok +- bep +- bogem +- broady +- eparis +- jharshman +- wfernandes diff --git a/mantle/vendor/github.com/spf13/cobra/Makefile b/mantle/vendor/github.com/spf13/cobra/Makefile new file mode 100644 index 00000000..443ef1a9 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/Makefile @@ -0,0 +1,35 @@ +BIN="./bin" +SRC=$(shell find . -name "*.go") + +ifeq (, $(shell which golangci-lint)) +$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") +endif + +ifeq (, $(shell which richgo)) +$(warning "could not find richgo in $(PATH), run: go get github.com/kyoh86/richgo") +endif + +.PHONY: fmt lint test install_deps clean + +default: all + +all: fmt test + +fmt: + $(info ******************** checking formatting ********************) + @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) + +lint: + $(info ******************** running lint tools ********************) + golangci-lint run -v + +test: install_deps + $(info ******************** running tests ********************) + richgo test -v ./... + +install_deps: + $(info ******************** downloading dependencies ********************) + go get -v ./... + +clean: + rm -rf $(BIN) diff --git a/mantle/vendor/github.com/spf13/cobra/README.md b/mantle/vendor/github.com/spf13/cobra/README.md index 2f8175bc..2bf15208 100644 --- a/mantle/vendor/github.com/spf13/cobra/README.md +++ b/mantle/vendor/github.com/spf13/cobra/README.md @@ -1,82 +1,34 @@ ![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) -Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. - -Many of the most widely used Go projects are built using Cobra, such as: -[Kubernetes](http://kubernetes.io/), -[Hugo](http://gohugo.io), -[rkt](https://github.com/coreos/rkt), -[etcd](https://github.com/coreos/etcd), -[Moby (former Docker)](https://github.com/moby/moby), -[Docker (distribution)](https://github.com/docker/distribution), -[OpenShift](https://www.openshift.com/), -[Delve](https://github.com/derekparker/delve), -[GopherJS](http://www.gopherjs.org/), -[CockroachDB](http://www.cockroachlabs.com/), -[Bleve](http://www.blevesearch.com/), -[ProjectAtomic (enterprise)](http://www.projectatomic.io/), -[Giant Swarm's gsctl](https://github.com/giantswarm/gsctl), -[Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack), -[rclone](http://rclone.org/), -[nehm](https://github.com/bogem/nehm), -[Pouch](https://github.com/alibaba/pouch), -[Istio](https://istio.io), -[Prototool](https://github.com/uber/prototool), -[mattermost-server](https://github.com/mattermost/mattermost-server), -[Gardener](https://github.com/gardener/gardenctl), -[Linkerd](https://linkerd.io/), -etc. - -[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) -[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) -[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) +Cobra is a library for creating powerful modern CLI applications. + +Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), +[Hugo](https://gohugo.io), and [Github CLI](https://github.com/cli/cli) to +name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra. -# Table of Contents - -- [Overview](#overview) -- [Concepts](#concepts) - * [Commands](#commands) - * [Flags](#flags) -- [Installing](#installing) -- [Getting Started](#getting-started) - * [Using the Cobra Generator](#using-the-cobra-generator) - * [Using the Cobra Library](#using-the-cobra-library) - * [Working with Flags](#working-with-flags) - * [Positional and Custom Arguments](#positional-and-custom-arguments) - * [Example](#example) - * [Help Command](#help-command) - * [Usage Message](#usage-message) - * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) - * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) - * [Generating documentation for your command](#generating-documentation-for-your-command) - * [Generating bash completions](#generating-bash-completions) - * [Generating zsh completions](#generating-zsh-completions) -- [Contributing](#contributing) -- [License](#license) +[![](https://img.shields.io/github/workflow/status/spf13/cobra/Test?longCache=tru&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) +[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) +[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) # Overview Cobra is a library providing a simple interface to create powerful modern CLI interfaces similar to git & go tools. -Cobra is also an application that will generate your application scaffolding to rapidly -develop a Cobra-based application. - Cobra provides: * Easy subcommand-based CLIs: `app server`, `app fetch`, etc. * Fully POSIX-compliant flags (including short & long versions) * Nested subcommands * Global, local and cascading flags -* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` * Intelligent suggestions (`app srver`... did you mean `app server`?) * Automatic help generation for commands and flags * Automatic help flag recognition of `-h`, `--help`, etc. -* Automatically generated bash autocomplete for your application +* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell) * Automatically generated man pages for your application * Command aliases so you can change things without breaking them * The flexibility to define your own help, usage, etc. -* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps +* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps # Concepts @@ -84,8 +36,8 @@ Cobra is built on a structure of commands, arguments & flags. **Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. -The best applications will read like sentences when used. Users will know how -to use the application because they will natively understand how to use it. +The best applications read like sentences when used, and as a result, users +intuitively know how to interact with them. The pattern to follow is `APPNAME VERB NOUN --ADJECTIVE.` @@ -110,7 +62,7 @@ have children commands and optionally run an action. In the example above, 'server' is the command. -[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) +[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command) ## Flags @@ -127,643 +79,32 @@ which maintains the same interface while adding POSIX compliance. # Installing Using Cobra is easy. First, use `go get` to install the latest version -of the library. This command will install the `cobra` generator executable -along with the library and its dependencies: - - go get -u github.com/spf13/cobra/cobra - -Next, include Cobra in your application: - -```go -import "github.com/spf13/cobra" -``` - -# Getting Started - -While you are welcome to provide your own organization, typically a Cobra-based -application will follow the following organizational structure: - -``` - ▾ appName/ - ▾ cmd/ - add.go - your.go - commands.go - here.go - main.go -``` - -In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -## Using the Cobra Generator - -Cobra provides its own program that will create your application and add any -commands you want. It's the easiest way to incorporate Cobra into your application. - -[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. - -## Using the Cobra Library - -To manually implement Cobra you need to create a bare main.go file and a rootCmd file. -You will optionally provide additional commands as you see fit. - -### Create rootCmd - -Cobra doesn't require any special constructors. Simply create your commands. - -Ideally you place this in app/cmd/root.go: - -```go -var rootCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, -} - -func Execute() { - if err := rootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } -} -``` - -You will additionally define flags and handle configuration in your init() function. - -For example cmd/root.go: - -```go -package cmd - -import ( - "fmt" - "os" - - homedir "github.com/mitchellh/go-homedir" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -var ( - // Used for flags. - cfgFile string - userLicense string - - rootCmd = &cobra.Command{ - Use: "cobra", - Short: "A generator for Cobra based Applications", - Long: `Cobra is a CLI library for Go that empowers applications. -This application is a tool to generate the needed files -to quickly create a Cobra application.`, - } -) - -// Execute executes the root command. -func Execute() error { - return rootCmd.Execute() -} - -func init() { - cobra.OnInitialize(initConfig) - - rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") - rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") - rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) - viper.SetDefault("author", "NAME HERE ") - viper.SetDefault("license", "apache") - - rootCmd.AddCommand(addCmd) - rootCmd.AddCommand(initCmd) -} - -func er(msg interface{}) { - fmt.Println("Error:", msg) - os.Exit(1) -} - -func initConfig() { - if cfgFile != "" { - // Use config file from the flag. - viper.SetConfigFile(cfgFile) - } else { - // Find home directory. - home, err := homedir.Dir() - if err != nil { - er(err) - } - - // Search config in home directory with name ".cobra" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".cobra") - } - - viper.AutomaticEnv() - - if err := viper.ReadInConfig(); err == nil { - fmt.Println("Using config file:", viper.ConfigFileUsed()) - } -} -``` - -### Create your main.go - -With the root command you need to have your main function execute it. -Execute should be run on the root for clarity, though it can be called on any command. - -In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. - -```go -package main - -import ( - "{pathToYourApp}/cmd" -) - -func main() { - cmd.Execute() -} -``` - -### Create additional commands - -Additional commands can be defined and typically are each given their own file -inside of the cmd/ directory. - -If you wanted to create a version command you would create cmd/version.go and -populate it with the following: - -```go -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func init() { - rootCmd.AddCommand(versionCmd) -} - -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, -} -``` - -## Working with Flags - -Flags provide modifiers to control how the action command operates. - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - -```go -var Verbose bool -var Source string -``` - -There are two different approaches to assign a flag. - -### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags, assign a flag as a persistent flag on the root. - -```go -rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") -``` - -### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - -```go -localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") -``` - -### Local Flag on Parent Commands - -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will -parse local flags on each command before executing the target command. - -```go -command := cobra.Command{ - Use: "print [OPTIONS] [COMMANDS]", - TraverseChildren: true, -} -``` - -### Bind Flags with Config - -You can also bind your flags with [viper](https://github.com/spf13/viper): -```go -var author string - -func init() { - rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) -} -``` - -In this example the persistent flag `author` is bound with `viper`. -**Note**, that the variable `author` will not be set to the value from config, -when the `--author` flag is not provided by user. +of the library. -More in [viper documentation](https://github.com/spf13/viper#working-with-flags). - -### Required flags - -Flags are optional by default. If instead you wish your command to report an error -when a flag has not been set, mark it as required: -```go -rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") -rootCmd.MarkFlagRequired("region") ``` - -## Positional and Custom Arguments - -Validation of positional arguments can be specified using the `Args` field -of `Command`. - -The following validators are built in: - -- `NoArgs` - the command will report an error if there are any positional args. -- `ArbitraryArgs` - the command will accept any args. -- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. -- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. -- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. -- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. -- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` -- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. - -An example of setting the custom validator: - -```go -var cmd = &cobra.Command{ - Short: "hello", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("requires a color argument") - } - if myapp.IsValidColor(args[0]) { - return nil - } - return fmt.Errorf("invalid color specified: %s", args[0]) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hello, World!") - }, -} +go get -u github.com/spf13/cobra@latest ``` -## Example - -In the example below, we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - -```go -package main - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -func main() { - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. -For many years people have printed back to the screen.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. -Echo works a lot like print, except it has a child command.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Echo: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing -a count and a string.`, - Args: cobra.MinimumNArgs(1), - Run: func(cmd *cobra.Command, args []string) { - for i := 0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() -} -``` - -For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). - -## Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally, help will also -support all other commands as input. Say, for instance, you have a command called -'create' without any additional configuration; Cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by Cobra. Nothing beyond the -command and flag definitions are needed. - - $ cobra help - - Cobra is a CLI library for Go that empowers applications. - This application is a tool to generate the needed files - to quickly create a Cobra application. - - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact, you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or your own template for the default command to use -with following functions: - -```go -cmd.SetHelpCommand(cmd *Command) -cmd.SetHelpFunc(f func(*Command, []string)) -cmd.SetHelpTemplate(s string) -``` - -The latter two will also apply to any children commands. - -## Usage Message - -When the user provides an invalid flag or invalid command, Cobra responds by -showing the user the 'usage'. - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of its output. - - $ cobra --invalid - Error: unknown flag: --invalid - Usage: - cobra [command] - - Available Commands: - add Add a command to a Cobra Application - help Help about any command - init Initialize a Cobra Application - - Flags: - -a, --author string author name for copyright attribution (default "YOUR NAME") - --config string config file (default is $HOME/.cobra.yaml) - -h, --help help for cobra - -l, --license string name of license for the project - --viper use Viper for configuration (default true) - - Use "cobra [command] --help" for more information about a command. - -### Defining your own usage -You can provide your own usage function or template for Cobra to use. -Like help, the function and template are overridable through public methods: - -```go -cmd.SetUsageFunc(f func(*Command) error) -cmd.SetUsageTemplate(s string) -``` - -## Version Flag - -Cobra adds a top-level '--version' flag if the Version field is set on the root command. -Running an application with the '--version' flag will print the version to stdout using -the version template. The template can be customized using the -`cmd.SetVersionTemplate(s string)` function. - -## PreRun and PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistentPostRun` - -An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My subcommand", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - rootCmd.Execute() - fmt.Println() - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - rootCmd.Execute() -} -``` - -Output: -``` -Inside rootCmd PersistentPreRun with args: [] -Inside rootCmd PreRun with args: [] -Inside rootCmd Run with args: [] -Inside rootCmd PostRun with args: [] -Inside rootCmd PersistentPostRun with args: [] - -Inside rootCmd PersistentPreRun with args: [arg1 arg2] -Inside subCmd PreRun with args: [arg1 arg2] -Inside subCmd Run with args: [arg1 arg2] -Inside subCmd PostRun with args: [arg1 arg2] -Inside subCmd PersistentPostRun with args: [arg1 arg2] -``` - -## Suggestions when "unknown command" happens - -Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: - -``` -$ hugo srever -Error: unknown command "srever" for "hugo" - -Did you mean this? - server - -Run 'hugo --help' for usage. -``` - -Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. - -If you need to disable suggestions or tweak the string distance in your command, use: +Next, include Cobra in your application: ```go -command.DisableSuggestions = true +import "github.com/spf13/cobra" ``` -or - -```go -command.SuggestionsMinimumDistance = 1 -``` +# Usage +`cobra-cli` is a command line program to generate cobra applications and command files. +It will bootstrap your application scaffolding to rapidly +develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application. -You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: +It can be installed by running: ``` -$ kubectl remove -Error: unknown command "remove" for "kubectl" - -Did you mean this? - delete - -Run 'kubectl help' for usage. +go install github.com/spf13/cobra-cli@latest ``` -## Generating documentation for your command - -Cobra can generate documentation based on subcommands, flags, etc. in the following formats: - -- [Markdown](doc/md_docs.md) -- [ReStructured Text](doc/rest_docs.md) -- [Man Page](doc/man_docs.md) - -## Generating bash completions - -Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). - -## Generating zsh completions - -Cobra can generate zsh-completion file. Read more about it in -[Zsh Completions](zsh_completions.md). - -# Contributing +For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -1. Fork it -2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) -3. Create your feature branch (`git checkout -b my-new-feature`) -4. Make changes and add them (`git add .`) -5. Commit your changes (`git commit -m 'Add some feature'`) -6. Push to the branch (`git push origin my-new-feature`) -7. Create new pull request +For complete details on using the Cobra library, please read the [The Cobra User Guide](user_guide.md). # License diff --git a/mantle/vendor/github.com/spf13/cobra/active_help.go b/mantle/vendor/github.com/spf13/cobra/active_help.go new file mode 100644 index 00000000..0c631913 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/active_help.go @@ -0,0 +1,49 @@ +package cobra + +import ( + "fmt" + "os" + "strings" +) + +const ( + activeHelpMarker = "_activeHelp_ " + // The below values should not be changed: programs will be using them explicitly + // in their user documentation, and users will be using them explicitly. + activeHelpEnvVarSuffix = "_ACTIVE_HELP" + activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpGlobalDisable = "0" +) + +// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. +// Such strings will be processed by the completion script and will be shown as ActiveHelp +// to the user. +// The array parameter should be the array that will contain the completions. +// This function can be called multiple times before and/or after completions are added to +// the array. Each time this function is called with the same array, the new +// ActiveHelp line will be shown below the previous ones when completion is triggered. +func AppendActiveHelp(compArray []string, activeHelpStr string) []string { + return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) +} + +// GetActiveHelpConfig returns the value of the ActiveHelp environment variable +// _ACTIVE_HELP where is the name of the root command in upper +// case, with all - replaced by _. +// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP +// is set to "0". +func GetActiveHelpConfig(cmd *Command) string { + activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar) + if activeHelpCfg != activeHelpGlobalDisable { + activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name())) + } + return activeHelpCfg +} + +// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment +// variable. It has the format _ACTIVE_HELP where is the name of the +// root command in upper case, with all - replaced by _. +func activeHelpEnvVar(name string) string { + // This format should not be changed: users will be using it explicitly. + activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) + return strings.ReplaceAll(activeHelpEnvVar, "-", "_") +} diff --git a/mantle/vendor/github.com/spf13/cobra/active_help.md b/mantle/vendor/github.com/spf13/cobra/active_help.md new file mode 100644 index 00000000..5e7f59af --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/active_help.md @@ -0,0 +1,157 @@ +# Active Help + +Active Help is a framework provided by Cobra which allows a program to define messages (hints, warnings, etc) that will be printed during program usage. It aims to make it easier for your users to learn how to use your program. If configured by the program, Active Help is printed when the user triggers shell completion. + +For example, +``` +bash-5.1$ helm repo add [tab] +You must choose a name for the repo you are adding. + +bash-5.1$ bin/helm package [tab] +Please specify the path to the chart to package + +bash-5.1$ bin/helm package [tab][tab] +bin/ internal/ scripts/ pkg/ testdata/ +``` + +**Hint**: A good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions to guide the user in knowing what is expected by the program. +## Supported shells + +Active Help is currently only supported for the following shells: +- Bash (using [bash completion V2](shell_completions.md#bash-completion-v2) only). Note that bash 4.4 or higher is required for the prompt to appear when an Active Help message is printed. +- Zsh + +## Adding Active Help messages + +As Active Help uses the shell completion system, the implementation of Active Help messages is done by enhancing custom dynamic completions. If you are not familiar with dynamic completions, please refer to [Shell Completions](shell_completions.md). + +Adding Active Help is done through the use of the `cobra.AppendActiveHelp(...)` function, where the program repeatedly adds Active Help messages to the list of completions. Keep reading for details. + +### Active Help for nouns + +Adding Active Help when completing a noun is done within the `ValidArgsFunction(...)` of a command. Please notice the use of `cobra.AppendActiveHelp(...)` in the following example: + +```go +cmd := &cobra.Command{ + Use: "add [NAME] [URL]", + Short: "add a chart repository", + Args: require.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return addRepo(args) + }, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + if len(args) == 0 { + comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") + } else if len(args) == 1 { + comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") + } else { + comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") + } + return comps, cobra.ShellCompDirectiveNoFileComp + }, +} +``` +The example above defines the completions (none, in this specific example) as well as the Active Help messages for the `helm repo add` command. It yields the following behavior: +``` +bash-5.1$ helm repo add [tab] +You must choose a name for the repo you are adding + +bash-5.1$ helm repo add grafana [tab] +You must specify the URL for the repo you are adding + +bash-5.1$ helm repo add grafana https://grafana.github.io/helm-charts [tab] +This command does not take any more arguments +``` +**Hint**: As can be seen in the above example, a good place to use Active Help messages is when the normal completion system does not provide any suggestions. In such cases, Active Help nicely supplements the normal shell completions. + +### Active Help for flags + +Providing Active Help for flags is done in the same fashion as for nouns, but using the completion function registered for the flag. For example: +```go +_ = cmd.RegisterFlagCompletionFunc("version", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) != 2 { + return cobra.AppendActiveHelp(nil, "You must first specify the chart to install before the --version flag can be completed"), cobra.ShellCompDirectiveNoFileComp + } + return compVersionFlag(args[1], toComplete) + }) +``` +The example above prints an Active Help message when not enough information was given by the user to complete the `--version` flag. +``` +bash-5.1$ bin/helm install myrelease --version 2.0.[tab] +You must first specify the chart to install before the --version flag can be completed + +bash-5.1$ bin/helm install myrelease bitnami/solr --version 2.0.[tab][tab] +2.0.1 2.0.2 2.0.3 +``` + +## User control of Active Help + +You may want to allow your users to disable Active Help or choose between different levels of Active Help. It is entirely up to the program to define the type of configurability of Active Help that it wants to offer, if any. +Allowing to configure Active Help is entirely optional; you can use Active Help in your program without doing anything about Active Help configuration. + +The way to configure Active Help is to use the program's Active Help environment +variable. That variable is named `_ACTIVE_HELP` where `` is the name of your +program in uppercase with any `-` replaced by an `_`. The variable should be set by the user to whatever +Active Help configuration values are supported by the program. + +For example, say `helm` has chosen to support three levels for Active Help: `on`, `off`, `local`. Then a user +would set the desired behavior to `local` by doing `export HELM_ACTIVE_HELP=local` in their shell. + +For simplicity, when in `cmd.ValidArgsFunction(...)` or a flag's completion function, the program should read the +Active Help configuration using the `cobra.GetActiveHelpConfig(cmd)` function and select what Active Help messages +should or should not be added (instead of reading the environment variable directly). + +For example: +```go +ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + activeHelpLevel := cobra.GetActiveHelpConfig(cmd) + + var comps []string + if len(args) == 0 { + if activeHelpLevel != "off" { + comps = cobra.AppendActiveHelp(comps, "You must choose a name for the repo you are adding") + } + } else if len(args) == 1 { + if activeHelpLevel != "off" { + comps = cobra.AppendActiveHelp(comps, "You must specify the URL for the repo you are adding") + } + } else { + if activeHelpLevel == "local" { + comps = cobra.AppendActiveHelp(comps, "This command does not take any more arguments") + } + } + return comps, cobra.ShellCompDirectiveNoFileComp +}, +``` +**Note 1**: If the `_ACTIVE_HELP` environment variable is set to the string "0", Cobra will automatically disable all Active Help output (even if some output was specified by the program using the `cobra.AppendActiveHelp(...)` function). Using "0" can simplify your code in situations where you want to blindly disable Active Help without having to call `cobra.GetActiveHelpConfig(cmd)` explicitly. + +**Note 2**: If a user wants to disable Active Help for every single program based on Cobra, she can set the environment variable `COBRA_ACTIVE_HELP` to "0". In this case `cobra.GetActiveHelpConfig(cmd)` will return "0" no matter what the variable `_ACTIVE_HELP` is set to. + +**Note 3**: If the user does not set `_ACTIVE_HELP` or `COBRA_ACTIVE_HELP` (which will be a common case), the default value for the Active Help configuration returned by `cobra.GetActiveHelpConfig(cmd)` will be the empty string. +## Active Help with Cobra's default completion command + +Cobra provides a default `completion` command for programs that wish to use it. +When using the default `completion` command, Active Help is configurable in the same +fashion as described above using environment variables. You may wish to document this in more +details for your users. + +## Debugging Active Help + +Debugging your Active Help code is done in the same way as debugging your dynamic completion code, which is with Cobra's hidden `__complete` command. Please refer to [debugging shell completion](shell_completions.md#debugging) for details. + +When debugging with the `__complete` command, if you want to specify different Active Help configurations, you should use the active help environment variable. That variable is named `_ACTIVE_HELP` where any `-` is replaced by an `_`. For example, we can test deactivating some Active Help as shown below: +``` +$ HELM_ACTIVE_HELP=1 bin/helm __complete install wordpress bitnami/h +bitnami/haproxy +bitnami/harbor +_activeHelp_ WARNING: cannot re-use a name that is still in use +:0 +Completion ended with directive: ShellCompDirectiveDefault + +$ HELM_ACTIVE_HELP=0 bin/helm __complete install wordpress bitnami/h +bitnami/haproxy +bitnami/harbor +:0 +Completion ended with directive: ShellCompDirectiveDefault +``` diff --git a/mantle/vendor/github.com/spf13/cobra/args.go b/mantle/vendor/github.com/spf13/cobra/args.go index c4d820b8..20a022b3 100644 --- a/mantle/vendor/github.com/spf13/cobra/args.go +++ b/mantle/vendor/github.com/spf13/cobra/args.go @@ -2,6 +2,7 @@ package cobra import ( "fmt" + "strings" ) type PositionalArgs func(cmd *Command, args []string) error @@ -34,8 +35,15 @@ func NoArgs(cmd *Command, args []string) error { // OnlyValidArgs returns an error if any args are not in the list of ValidArgs. func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { + // Remove any description that may be included in ValidArgs. + // A description is following a tab character. + var validArgs []string + for _, v := range cmd.ValidArgs { + validArgs = append(validArgs, strings.Split(v, "\t")[0]) + } + for _, v := range args { - if !stringInSlice(v, cmd.ValidArgs) { + if !stringInSlice(v, validArgs) { return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) } } @@ -99,3 +107,15 @@ func RangeArgs(min int, max int) PositionalArgs { return nil } } + +// MatchAll allows combining several PositionalArgs to work in concert. +func MatchAll(pargs ...PositionalArgs) PositionalArgs { + return func(cmd *Command, args []string) error { + for _, parg := range pargs { + if err := parg(cmd, args); err != nil { + return err + } + } + return nil + } +} diff --git a/mantle/vendor/github.com/spf13/cobra/bash_completions.go b/mantle/vendor/github.com/spf13/cobra/bash_completions.go index 1e0e25cf..cb7e1953 100644 --- a/mantle/vendor/github.com/spf13/cobra/bash_completions.go +++ b/mantle/vendor/github.com/spf13/cobra/bash_completions.go @@ -19,12 +19,12 @@ const ( BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" ) -func writePreamble(buf *bytes.Buffer, name string) { - buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(fmt.Sprintf(` +func writePreamble(buf io.StringWriter, name string) { + WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` __%[1]s_debug() { - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" fi } @@ -58,6 +58,100 @@ __%[1]s_contains_word() return 1 } +__%[1]s_handle_go_custom_completion() +{ + __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}" + + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + local out requestComp lastParam lastChar comp directive args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + args=("${words[@]:1}") + # Disable ActiveHelp which is not supported for bash completion v1 + requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}" + + if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter" + requestComp="${requestComp} \"\"" + fi + + __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [ "${directive}" = "${out}" ]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" + __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}" + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + # Error code. No completion. + __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code" + return + else + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "${FUNCNAME[0]}: activating no space" + compopt -o nospace + fi + fi + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "${FUNCNAME[0]}: activating no file completion" + compopt +o default + fi + fi + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local fullFilter filter filteringCmd + # Do not use quotes around the $out variable or else newline + # characters will be kept. + for filter in ${out}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + local subdir + # Use printf to strip any trailing newline + subdir=$(printf "%%s" "${out}") + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + __%[1]s_handle_subdirs_in_dir_flag "$subdir" + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${out}" -- "$cur") + fi +} + __%[1]s_handle_reply() { __%[1]s_debug "${FUNCNAME[0]}" @@ -94,13 +188,19 @@ __%[1]s_handle_reply() PREFIX="" cur="${cur#*=}" ${flags_completion[${index}]} - if [ -n "${ZSH_VERSION}" ]; then + if [ -n "${ZSH_VERSION:-}" ]; then # zsh completion needs --flag= prefix eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" fi fi fi - return 0; + + if [[ -z "${flag_parsing_disabled}" ]]; then + # If flag parsing is enabled, we have completed the flags and can return. + # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough + # to possibly call handle_go_custom_completion. + return 0; + fi ;; esac @@ -120,7 +220,10 @@ __%[1]s_handle_reply() local completions completions=("${commands[@]}") if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") + completions+=("${must_have_one_noun[@]}") + elif [[ -n "${has_completion_function}" ]]; then + # if a go completion function is provided, defer to that function + __%[1]s_handle_go_custom_completion fi if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then completions+=("${must_have_one_flag[@]}") @@ -136,13 +239,13 @@ __%[1]s_handle_reply() fi if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - if declare -F __%[1]s_custom_func >/dev/null; then - # try command name qualified custom func - __%[1]s_custom_func - else - # otherwise fall back to unqualified for compatibility - declare -F __custom_func >/dev/null && __custom_func - fi + if declare -F __%[1]s_custom_func >/dev/null; then + # try command name qualified custom func + __%[1]s_custom_func + else + # otherwise fall back to unqualified for compatibility + declare -F __custom_func >/dev/null && __custom_func + fi fi # available in bash-completion >= 2, not always present on macOS @@ -176,7 +279,7 @@ __%[1]s_handle_flag() # if a command required a flag, and we found it, unset must_have_one_flag() local flagname=${words[c]} - local flagvalue + local flagvalue="" # if the word contained an = if [[ ${words[c]} == *"="* ]]; then flagvalue=${flagname#*=} # take in as flagvalue after the = @@ -195,7 +298,7 @@ __%[1]s_handle_flag() # keep flag value with flagname as flaghash # flaghash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then if [ -n "${flagvalue}" ] ; then flaghash[${flagname}]=${flagvalue} elif [ -n "${words[ $((c+1)) ]}" ] ; then @@ -207,7 +310,7 @@ __%[1]s_handle_flag() # skip the argument to a two word flag if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then - __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" + __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" c=$((c+1)) # if we are looking for a flags value, don't show commands if [[ $c -eq $cword ]]; then @@ -267,7 +370,7 @@ __%[1]s_handle_word() __%[1]s_handle_command elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then # aliashash variable is an associative array which is only supported in bash > 3. - if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then words[c]=${aliashash[${words[c]}]} __%[1]s_handle_command else @@ -279,14 +382,16 @@ __%[1]s_handle_word() __%[1]s_handle_word } -`, name)) +`, name, ShellCompNoDescRequestCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) } -func writePostscript(buf *bytes.Buffer, name string) { - name = strings.Replace(name, ":", "__", -1) - buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) - buf.WriteString(fmt.Sprintf(`{ - local cur prev words cword +func writePostscript(buf io.StringWriter, name string) { + name = strings.ReplaceAll(name, ":", "__") + WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(`{ + local cur prev words cword split declare -A flaghash 2>/dev/null || : declare -A aliashash 2>/dev/null || : if declare -F _init_completion >/dev/null 2>&1; then @@ -296,48 +401,52 @@ func writePostscript(buf *bytes.Buffer, name string) { fi local c=0 + local flag_parsing_disabled= local flags=() local two_word_flags=() local local_nonpersistent_flags=() local flags_with_completion=() local flags_completion=() local commands=("%[1]s") + local command_aliases=() local must_have_one_flag=() local must_have_one_noun=() - local last_command + local has_completion_function="" + local last_command="" local nouns=() + local noun_aliases=() __%[1]s_handle_word } `, name)) - buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then + WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then complete -o default -F __start_%s %s else complete -o default -o nospace -F __start_%s %s fi `, name, name, name, name)) - buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") + WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") } -func writeCommands(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" commands=()\n") +func writeCommands(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " commands=()\n") for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { + if !c.IsAvailableCommand() && c != cmd.helpCommand { continue } - buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) + WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) writeCmdAliases(buf, c) } - buf.WriteString("\n") + WriteStringAndCheck(buf, "\n") } -func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { +func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { for key, value := range annotations { switch key { case BashCompFilenameExt: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) var ext string if len(value) > 0 { @@ -345,17 +454,18 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } else { ext = "_filedir" } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) case BashCompCustom: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + if len(value) > 0 { handlers := strings.Join(value, "; ") - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) } else { - buf.WriteString(" flags_completion+=(:)\n") + WriteStringAndCheck(buf, " flags_completion+=(:)\n") } case BashCompSubdirsInDir: - buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) var ext string if len(value) == 1 { @@ -363,55 +473,81 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } else { ext = "_filedir -d" } - buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) } } } -func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { +const cbn = "\")\n" + +func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { name := flag.Shorthand format := " " if len(flag.NoOptDefVal) == 0 { format += "two_word_" } - format += "flags+=(\"-%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format += "flags+=(\"-%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) } -func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { +func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { name := flag.Name format := " flags+=(\"--%s" if len(flag.NoOptDefVal) == 0 { format += "=" } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) if len(flag.NoOptDefVal) == 0 { - format = " two_word_flags+=(\"--%s\")\n" - buf.WriteString(fmt.Sprintf(format, name)) + format = " two_word_flags+=(\"--%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) } writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) } -func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { +func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { name := flag.Name - format := " local_nonpersistent_flags+=(\"--%s" + format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn if len(flag.NoOptDefVal) == 0 { - format += "=" + format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn + } + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) + } +} + +// Setup annotations for go completions for registered flags +func prepareCustomAnnotationsForFlags(cmd *Command) { + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + for flag := range flagCompletionFunctions { + // Make sure the completion script calls the __*_go_custom_completion function for + // every registered flag. We need to do this here (and not when the flag was registered + // for completion) so that we can know the root command name for the prefix + // of ___go_custom_completion + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())} } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, name)) } -func writeFlags(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(` flags=() +func writeFlags(buf io.StringWriter, cmd *Command) { + prepareCustomAnnotationsForFlags(cmd) + WriteStringAndCheck(buf, ` flags=() two_word_flags=() local_nonpersistent_flags=() flags_with_completion=() flags_completion=() `) + + if cmd.DisableFlagParsing { + WriteStringAndCheck(buf, " flag_parsing_disabled=1\n") + } + localNonPersistentFlags := cmd.LocalNonPersistentFlags() cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { if nonCompletableFlag(flag) { @@ -421,7 +557,9 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) { if len(flag.Shorthand) > 0 { writeShortFlag(buf, flag, cmd) } - if localNonPersistentFlags.Lookup(flag.Name) != nil { + // localNonPersistentFlags are used to stop the completion of subcommands when one is set + // if TraverseChildren is true we should allow to complete subcommands + if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren { writeLocalNonPersistentFlag(buf, flag) } }) @@ -435,11 +573,11 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) { } }) - buf.WriteString("\n") + WriteStringAndCheck(buf, "\n") } -func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_flag=()\n") +func writeRequiredFlag(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_flag=()\n") flags := cmd.NonInheritedFlags() flags.VisitAll(func(flag *pflag.Flag) { if nonCompletableFlag(flag) { @@ -452,76 +590,82 @@ func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { if flag.Value.Type() != "bool" { format += "=" } - format += "\")\n" - buf.WriteString(fmt.Sprintf(format, flag.Name)) + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) if len(flag.Shorthand) > 0 { - buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } } }) } -func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" must_have_one_noun=()\n") - sort.Sort(sort.StringSlice(cmd.ValidArgs)) +func writeRequiredNouns(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_noun=()\n") + sort.Strings(cmd.ValidArgs) for _, value := range cmd.ValidArgs { - buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + // Remove any description that may be included following a tab character. + // Descriptions are not supported by bash completion. + value = strings.Split(value, "\t")[0] + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + } + if cmd.ValidArgsFunction != nil { + WriteStringAndCheck(buf, " has_completion_function=1\n") } } -func writeCmdAliases(buf *bytes.Buffer, cmd *Command) { +func writeCmdAliases(buf io.StringWriter, cmd *Command) { if len(cmd.Aliases) == 0 { return } - sort.Sort(sort.StringSlice(cmd.Aliases)) + sort.Strings(cmd.Aliases) - buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n")) for _, value := range cmd.Aliases { - buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value)) - buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) + WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) } - buf.WriteString(` fi`) - buf.WriteString("\n") + WriteStringAndCheck(buf, ` fi`) + WriteStringAndCheck(buf, "\n") } -func writeArgAliases(buf *bytes.Buffer, cmd *Command) { - buf.WriteString(" noun_aliases=()\n") - sort.Sort(sort.StringSlice(cmd.ArgAliases)) +func writeArgAliases(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " noun_aliases=()\n") + sort.Strings(cmd.ArgAliases) for _, value := range cmd.ArgAliases { - buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) } } -func gen(buf *bytes.Buffer, cmd *Command) { +func gen(buf io.StringWriter, cmd *Command) { for _, c := range cmd.Commands() { - if !c.IsAvailableCommand() || c == cmd.helpCommand { + if !c.IsAvailableCommand() && c != cmd.helpCommand { continue } gen(buf, c) } commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - commandName = strings.Replace(commandName, ":", "__", -1) + commandName = strings.ReplaceAll(commandName, " ", "_") + commandName = strings.ReplaceAll(commandName, ":", "__") if cmd.Root() == cmd { - buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) } else { - buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) } - buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) - buf.WriteString("\n") - buf.WriteString(" command_aliases=()\n") - buf.WriteString("\n") + WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) + WriteStringAndCheck(buf, "\n") + WriteStringAndCheck(buf, " command_aliases=()\n") + WriteStringAndCheck(buf, "\n") writeCommands(buf, cmd) writeFlags(buf, cmd) writeRequiredFlag(buf, cmd) writeRequiredNouns(buf, cmd) writeArgAliases(buf, cmd) - buf.WriteString("}\n\n") + WriteStringAndCheck(buf, "}\n\n") } // GenBashCompletion generates bash completion file and writes to the passed writer. diff --git a/mantle/vendor/github.com/spf13/cobra/bash_completions.md b/mantle/vendor/github.com/spf13/cobra/bash_completions.md index 4ac61ee1..52919b2f 100644 --- a/mantle/vendor/github.com/spf13/cobra/bash_completions.md +++ b/mantle/vendor/github.com/spf13/cobra/bash_completions.md @@ -1,64 +1,16 @@ -# Generating Bash Completions For Your Own cobra.Command +# Generating Bash Completions For Your cobra.Command -If you are using the generator you can create a completion command by running +Please refer to [Shell Completions](shell_completions.md) for details. -```bash -cobra add completion -``` - -Update the help text show how to install the bash_completion Linux show here [Kubectl docs show mac options](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion) - -Writing the shell script to stdout allows the most flexible use. - -```go -// completionCmd represents the completion command -var completionCmd = &cobra.Command{ - Use: "completion", - Short: "Generates bash completion scripts", - Long: `To load completion run - -. <(bitbucket completion) - -To configure your bash shell to load completions for each session add to your bashrc - -# ~/.bashrc or ~/.profile -. <(bitbucket completion) -`, - Run: func(cmd *cobra.Command, args []string) { - rootCmd.GenBashCompletion(os.Stdout); - }, -} -``` - -**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script - - -## Example from kubectl - -Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: +## Bash legacy dynamic completions -```go -package main - -import ( - "io/ioutil" - "os" - - "k8s.io/kubernetes/pkg/kubectl/cmd" - "k8s.io/kubernetes/pkg/kubectl/cmd/util" -) +For backward compatibility, Cobra still supports its legacy dynamic completion solution (described below). Unlike the `ValidArgsFunction` solution, the legacy solution will only work for Bash shell-completion and not for other shells. This legacy solution can be used along-side `ValidArgsFunction` and `RegisterFlagCompletionFunc()`, as long as both solutions are not used for the same command. This provides a path to gradually migrate from the legacy solution to the new solution. -func main() { - kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") -} -``` +**Note**: Cobra's default `completion` command uses bash completion V2. If you are currently using Cobra's legacy dynamic completion solution, you should not use the default `completion` command but continue using your own. -`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. +The legacy solution allows you to inject bash functions into the bash completion script. Those bash functions are responsible for providing the completion choices for your own completions. -## Creating your own custom functions - -Some more actual code that works in kubernetes: +Some code that works in kubernetes: ```bash const ( @@ -111,108 +63,7 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`___custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! -## Have the completions code complete your 'nouns' - -In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: - -```go -validArgs []string = { "pod", "node", "service", "replicationcontroller" } - -cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", - Short: "Display one or many resources", - Long: get_long, - Example: get_example, - Run: func(cmd *cobra.Command, args []string) { - err := RunGet(f, out, cmd, args) - util.CheckErr(err) - }, - ValidArgs: validArgs, -} -``` - -Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like - -```bash -# kubectl get [tab][tab] -node pod replicationcontroller service -``` - -## Plural form and shortcuts for nouns - -If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: - -```go -argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } - -cmd := &cobra.Command{ - ... - ValidArgs: validArgs, - ArgAliases: argAliases -} -``` - -The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by -the completion algorithm if entered manually, e.g. in: - -```bash -# kubectl get rc [tab][tab] -backend frontend database -``` - -Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns -in this example again instead of the replication controllers. - -## Mark flags as required - -Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. - -```go -cmd.MarkFlagRequired("pod") -cmd.MarkFlagRequired("container") -``` - -and you'll get something like - -```bash -# kubectl exec [tab][tab][tab] --c --container= -p --pod= -``` - -# Specify valid filename extensions for flags that take a filename - -In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. - -```go - annotations := []string{"json", "yaml", "yml"} - annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = annotations - - flag := &pflag.Flag{ - Name: "filename", - Shorthand: "f", - Usage: usage, - Value: value, - DefValue: value.String(), - Annotations: annotation, - } - cmd.Flags().AddFlag(flag) -``` - -Now when you run a command with this filename flag you'll get something like - -```bash -# kubectl create -f -test/ example/ rpmbuild/ -hello.yml test.json -``` - -So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. - -# Specify custom flag completion - -Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify -a custom flag completion function with cobra.BashCompCustom: +Similarly, for flags: ```go annotation := make(map[string][]string) @@ -226,7 +77,7 @@ a custom flag completion function with cobra.BashCompCustom: cmd.Flags().AddFlag(flag) ``` -In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` +In addition add the `__kubectl_get_namespaces` implementation in the `BashCompletionFunction` value, e.g.: ```bash @@ -240,17 +91,3 @@ __kubectl_get_namespaces() fi } ``` -# Using bash aliases for commands - -You can also configure the `bash aliases` for the commands and they will also support completions. - -```bash -alias aliasname=origcommand -complete -o default -F __start_origcommand aliasname - -# and now when you run `aliasname` completion will make -# suggestions as it did for `origcommand`. - -$) aliasname -completion firstcommand secondcommand -``` diff --git a/mantle/vendor/github.com/spf13/cobra/bash_completionsV2.go b/mantle/vendor/github.com/spf13/cobra/bash_completionsV2.go new file mode 100644 index 00000000..767bf031 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -0,0 +1,369 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genBashComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genBashComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + + WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Macs have bash3 for which the bash-completion package doesn't include +# _init_completion. This is a minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +# This function calls the %[1]s program to obtain the completion +# results and the directive. It fills the 'out' and 'directive' vars. +__%[1]s_get_completion_results() { + local requestComp lastParam lastChar args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows to handle aliases + args=("${words[@]:1}") + requestComp="${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" + + if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} ''" + fi + + # When completing a flag with an = (e.g., %[1]s -n=) + # bash focuses on the part after the =, so we need to remove + # the flag part from $cur + if [[ "${cur}" == -*=* ]]; then + cur="${cur#*=}" + fi + + __%[1]s_debug "Calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [ "${directive}" = "${out}" ]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "The completion directive is: ${directive}" + __%[1]s_debug "The completions are: ${out}" +} + +__%[1]s_process_completion_results() { + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + else + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "Activating no space" + compopt -o nospace + else + __%[1]s_debug "No space directive not supported in this version of bash" + fi + fi + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "Activating no file completion" + compopt +o default + else + __%[1]s_debug "No file completion directive not supported in this version of bash" + fi + fi + fi + + # Separate activeHelp from normal completions + local completions=() + local activeHelp=() + __%[1]s_extract_activeHelp + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local fullFilter filter filteringCmd + + # Do not use quotes around the $completions variable or else newline + # characters will be kept. + for filter in ${completions[*]}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + + # Use printf to strip any trailing newline + local subdir + subdir=$(printf "%%s" "${completions[0]}") + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + __%[1]s_handle_completion_types + fi + + __%[1]s_handle_special_char "$cur" : + __%[1]s_handle_special_char "$cur" = + + # Print the activeHelp statements before we finish + if [ ${#activeHelp} -ne 0 ]; then + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" + fi + fi +} + +# Separate activeHelp lines from real completions. +# Fills the $activeHelp and $completions arrays. +__%[1]s_extract_activeHelp() { + local activeHelpMarker="%[8]s" + local endIndex=${#activeHelpMarker} + + while IFS='' read -r comp; do + if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then + comp=${comp:endIndex} + __%[1]s_debug "ActiveHelp found: $comp" + if [ -n "$comp" ]; then + activeHelp+=("$comp") + fi + else + # Not an activeHelp line but a normal completion + completions+=("$comp") + fi + done < <(printf "%%s\n" "${out}") +} + +__%[1]s_handle_completion_types() { + __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE" + + case $COMP_TYPE in + 37|42) + # Type: menu-complete/menu-complete-backward and insert-completions + # If the user requested inserting one completion at a time, or all + # completions at once on the command-line we must remove the descriptions. + # https://github.com/spf13/cobra/issues/1508 + local tab=$'\t' comp + while IFS='' read -r comp; do + [[ -z $comp ]] && continue + # Strip any description + comp=${comp%%%%$tab*} + # Only consider the completions that match + if [[ $comp == "$cur"* ]]; then + COMPREPLY+=("$comp") + fi + done < <(printf "%%s\n" "${completions[@]}") + ;; + + *) + # Type: complete (normal completion) + __%[1]s_handle_standard_completion_case + ;; + esac +} + +__%[1]s_handle_standard_completion_case() { + local tab=$'\t' comp + + # Short circuit to optimize if we don't have descriptions + if [[ "${completions[*]}" != *$tab* ]]; then + IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + return 0 + fi + + local longest=0 + local compline + # Look for the longest completion so that we can format things nicely + while IFS='' read -r compline; do + [[ -z $compline ]] && continue + # Strip any description before checking the length + comp=${compline%%%%$tab*} + # Only consider the completions that match + [[ $comp == "$cur"* ]] || continue + COMPREPLY+=("$compline") + if ((${#comp}>longest)); then + longest=${#comp} + fi + done < <(printf "%%s\n" "${completions[@]}") + + # If there is a single completion left, remove the description text + if [ ${#COMPREPLY[*]} -eq 1 ]; then + __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" + comp="${COMPREPLY[0]%%%%$tab*}" + __%[1]s_debug "Removed description from single completion, which is now: ${comp}" + COMPREPLY[0]=$comp + else # Format the descriptions + __%[1]s_format_comp_descriptions $longest + fi +} + +__%[1]s_handle_special_char() +{ + local comp="$1" + local char=$2 + if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then + local word=${comp%%"${comp##*${char}}"} + local idx=${#COMPREPLY[*]} + while [[ $((--idx)) -ge 0 ]]; do + COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"} + done + fi +} + +__%[1]s_format_comp_descriptions() +{ + local tab=$'\t' + local comp desc maxdesclength + local longest=$1 + + local i ci + for ci in ${!COMPREPLY[*]}; do + comp=${COMPREPLY[ci]} + # Properly format the description string which follows a tab character if there is one + if [[ "$comp" == *$tab* ]]; then + __%[1]s_debug "Original comp: $comp" + desc=${comp#*$tab} + comp=${comp%%%%$tab*} + + # $COLUMNS stores the current shell width. + # Remove an extra 4 because we add 2 spaces and 2 parentheses. + maxdesclength=$(( COLUMNS - longest - 4 )) + + # Make sure we can fit a description of at least 8 characters + # if we are to align the descriptions. + if [[ $maxdesclength -gt 8 ]]; then + # Add the proper number of spaces to align the descriptions + for ((i = ${#comp} ; i < longest ; i++)); do + comp+=" " + done + else + # Don't pad the descriptions so we can fit more text after the completion + maxdesclength=$(( COLUMNS - ${#comp} - 4 )) + fi + + # If there is enough space for any description text, + # truncate the descriptions that are too long for the shell width + if [ $maxdesclength -gt 0 ]; then + if [ ${#desc} -gt $maxdesclength ]; then + desc=${desc:0:$(( maxdesclength - 1 ))} + desc+="…" + fi + comp+=" ($desc)" + fi + COMPREPLY[ci]=$comp + __%[1]s_debug "Final comp: $comp" + fi + done +} + +__start_%[1]s() +{ + local cur prev words cword split + + COMPREPLY=() + + # Call _init_completion from the bash-completion package + # to prepare the arguments properly + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -n "=:" || return + else + __%[1]s_init_completion -n "=:" || return + fi + + __%[1]s_debug + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $cword location, so we need + # to truncate the command-line ($words) up to the $cword location. + words=("${words[@]:0:$cword+1}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + local out directive + __%[1]s_get_completion_results + __%[1]s_process_completion_results +} + +if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%[1]s %[1]s +else + complete -o default -o nospace -F __start_%[1]s %[1]s +fi + +# ex: ts=4 sw=4 et filetype=sh +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + activeHelpMarker)) +} + +// GenBashCompletionFileV2 generates Bash completion version 2. +func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletionV2(outFile, includeDesc) +} + +// GenBashCompletionV2 generates Bash completion file version 2 +// and writes it to the passed writer. +func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { + return c.genBashCompletion(w, includeDesc) +} diff --git a/mantle/vendor/github.com/spf13/cobra/cobra.go b/mantle/vendor/github.com/spf13/cobra/cobra.go index d01becc8..d6cbfd71 100644 --- a/mantle/vendor/github.com/spf13/cobra/cobra.go +++ b/mantle/vendor/github.com/spf13/cobra/cobra.go @@ -19,6 +19,7 @@ package cobra import ( "fmt" "io" + "os" "reflect" "strconv" "strings" @@ -205,3 +206,17 @@ func stringInSlice(a string, list []string) bool { } return false } + +// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. +func CheckErr(msg interface{}) { + if msg != nil { + fmt.Fprintln(os.Stderr, "Error:", msg) + os.Exit(1) + } +} + +// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. +func WriteStringAndCheck(b io.StringWriter, s string) { + _, err := b.WriteString(s) + CheckErr(err) +} diff --git a/mantle/vendor/github.com/spf13/cobra/command.go b/mantle/vendor/github.com/spf13/cobra/command.go index fb60ebd9..675bb134 100644 --- a/mantle/vendor/github.com/spf13/cobra/command.go +++ b/mantle/vendor/github.com/spf13/cobra/command.go @@ -29,8 +29,6 @@ import ( flag "github.com/spf13/pflag" ) -var ErrSubCommandRequired = errors.New("subcommand is required") - // FParseErrWhitelist configures Flag parse errors to be ignored type FParseErrWhitelist flag.ParseErrorsWhitelist @@ -40,6 +38,14 @@ type FParseErrWhitelist flag.ParseErrorsWhitelist // definition to ensure usability. type Command struct { // Use is the one-line usage message. + // Recommended syntax is as follow: + // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. + // ... indicates that you can specify multiple values for the previous argument. + // | indicates mutually exclusive information. You can use the argument to the left of the separator or the + // argument to the right of the separator. You cannot use both arguments in a single use of the command. + // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are + // optional, they are enclosed in brackets ([ ]). + // Example: add [-F file | -D dir]... [-f format] profile Use string // Aliases is an array of aliases that can be used instead of the first word in Use. @@ -58,33 +64,36 @@ type Command struct { // Example is examples of how to use the command. Example string - // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions + // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions ValidArgs []string + // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. + // It is a dynamic version of using ValidArgs. + // Only one of ValidArgs and ValidArgsFunction can be used for a command. + ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) // Expected arguments Args PositionalArgs // ArgAliases is List of aliases for ValidArgs. - // These are not suggested to the user in the bash completion, + // These are not suggested to the user in the shell completion, // but accepted if entered manually. ArgAliases []string - // BashCompletionFunction is custom functions used by the bash autocompletion generator. + // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. + // For portability with other shells, it is recommended to instead use ValidArgsFunction BashCompletionFunction string // Deprecated defines, if this command is deprecated and should print this string when used. Deprecated string - // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. - Hidden bool - // Annotations are key/value pairs that can be used by applications to identify or // group commands. Annotations map[string]string // Version defines the version for this command. If this value is non-empty and the command does not // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, - // will print content of the "Version" variable. + // will print content of the "Version" variable. A shorthand "v" flag will also be added if the + // command does not define one. Version string // The *Run functions are executed in the following order: @@ -116,55 +125,6 @@ type Command struct { // PersistentPostRunE: PersistentPostRun but returns an error. PersistentPostRunE func(cmd *Command, args []string) error - // SilenceErrors is an option to quiet errors down stream. - SilenceErrors bool - - // SilenceUsage is an option to silence usage when an error occurs. - SilenceUsage bool - - // DisableFlagParsing disables the flag parsing. - // If this is true all flags will be passed to the command as arguments. - DisableFlagParsing bool - - // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") - // will be printed by generating docs for this command. - DisableAutoGenTag bool - - // DisableFlagsInUseLine will disable the addition of [flags] to the usage - // line of a command when printing help or generating docs - DisableFlagsInUseLine bool - - // DisableSuggestions disables the suggestions based on Levenshtein distance - // that go along with 'unknown command' messages. - DisableSuggestions bool - // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. - // Must be > 0. - SuggestionsMinimumDistance int - - // TraverseChildren parses flags on all parents before executing child command. - TraverseChildren bool - - // FParseErrWhitelist flag parse errors to be ignored - FParseErrWhitelist FParseErrWhitelist - - ctx context.Context - - // commands is the list of commands supported by this program. - commands []*Command - // parent is a parent command for this command. - parent *Command - // Max lengths of commands' string lengths for use in padding. - commandsMaxUseLen int - commandsMaxCommandPathLen int - commandsMaxNameLen int - // commandsAreSorted defines, if command slice are sorted or not. - commandsAreSorted bool - // commandCalledAs is the name or alias value used to call this command. - commandCalledAs struct { - name string - called bool - } - // args is actual args parsed from flags. args []string // flagErrorBuf contains all error messages from pflag. @@ -206,14 +166,82 @@ type Command struct { outWriter io.Writer // errWriter is a writer defined by the user that replaces stderr errWriter io.Writer + + // FParseErrWhitelist flag parse errors to be ignored + FParseErrWhitelist FParseErrWhitelist + + // CompletionOptions is a set of options to control the handling of shell completion + CompletionOptions CompletionOptions + + // commandsAreSorted defines, if command slice are sorted or not. + commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } + + ctx context.Context + + // commands is the list of commands supported by this program. + commands []*Command + // parent is a parent command for this command. + parent *Command + // Max lengths of commands' string lengths for use in padding. + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + Hidden bool + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. + DisableAutoGenTag bool + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int } -// Context returns underlying command context. If command wasn't -// executed with ExecuteContext Context returns Background context. +// Context returns underlying command context. If command was executed +// with ExecuteContext or the context was set with SetContext, the +// previously set context will be returned. Otherwise, nil is returned. +// +// Notice that a call to Execute and ExecuteC will replace a nil context of +// a command with a context.Background, so a background context will be +// returned by Context after one of these functions has been called. func (c *Command) Context() context.Context { return c.ctx } +// SetContext sets context for the command. It is set to context.Background by default and will be overwritten by +// Command.ExecuteContext or Command.ExecuteContextC +func (c *Command) SetContext(ctx context.Context) { + c.ctx = ctx +} + // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden // particularly useful when testing. func (c *Command) SetArgs(a []string) { @@ -309,7 +337,7 @@ func (c *Command) ErrOrStderr() io.Writer { return c.getErr(os.Stderr) } -// InOrStdin returns output to stderr +// InOrStdin returns input to stdin func (c *Command) InOrStdin() io.Reader { return c.getIn(os.Stdin) } @@ -357,7 +385,7 @@ func (c *Command) UsageFunc() (f func(*Command) error) { c.mergePersistentFlags() err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) if err != nil { - c.Println(err) + c.PrintErrln(err) } return err } @@ -385,7 +413,7 @@ func (c *Command) HelpFunc() func(*Command, []string) { // See https://github.com/spf13/cobra/issues/1002 err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) if err != nil { - c.Println(err) + c.PrintErrln(err) } } } @@ -408,7 +436,7 @@ func (c *Command) UsageString() string { c.outWriter = bb c.errWriter = bb - c.Usage() + CheckErr(c.Usage()) // Setting things back to normal c.outWriter = tmpOutput @@ -800,7 +828,7 @@ func (c *Command) execute(a []string) (err error) { } if !c.Runnable() { - return ErrSubCommandRequired + return flag.ErrHelp } c.preRun() @@ -836,6 +864,10 @@ func (c *Command) execute(a []string) (err error) { if err := c.validateRequiredFlags(); err != nil { return err } + if err := c.validateFlagGroups(); err != nil { + return err + } + if c.RunE != nil { if err := c.RunE(c, argWoFlags); err != nil { return err @@ -872,7 +904,8 @@ func (c *Command) preRun() { } // ExecuteContext is the same as Execute(), but sets the ctx on the command. -// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle functions. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. func (c *Command) ExecuteContext(ctx context.Context) error { c.ctx = ctx return c.Execute() @@ -886,6 +919,14 @@ func (c *Command) Execute() error { return err } +// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { + c.ctx = ctx + return c.ExecuteC() +} + // ExecuteC executes the command. func (c *Command) ExecuteC() (cmd *Command, err error) { if c.ctx == nil { @@ -902,9 +943,10 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { preExecHookFn(c) } - // initialize help as the last point possible to allow for user - // overriding + // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() + // initialize completion at the last point to allow for user overriding + c.initDefaultCompletionCmd() args := c.args @@ -913,6 +955,9 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } + // initialize the hidden command to be used for shell completion + c.initCompleteCmd(args) + var flags []string if c.TraverseChildren { cmd, flags, err = c.Traverse(args) @@ -925,8 +970,8 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { c = cmd } if !c.SilenceErrors { - c.Println("Error:", err.Error()) - c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) + c.PrintErrln("Error:", err.Error()) + c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) } return c, err } @@ -946,26 +991,18 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { if err != nil { // Always show help if requested, even if SilenceErrors is in // effect - if err == flag.ErrHelp { + if errors.Is(err, flag.ErrHelp) { cmd.HelpFunc()(cmd, args) return cmd, nil } - // If command wasn't runnable, show full help, but do return the error. - // This will result in apps by default returning a non-success exit code, but also gives them the option to - // handle specially. - if err == ErrSubCommandRequired { - cmd.HelpFunc()(cmd, args) - return cmd, err - } - - // If root command has SilentErrors flagged, + // If root command has SilenceErrors flagged, // all subcommands should respect it if !cmd.SilenceErrors && !c.SilenceErrors { - c.Println("Error:", err.Error()) + c.PrintErrln("Error:", err.Error()) } - // If root command has SilentUsage flagged, + // If root command has SilenceUsage flagged, // all subcommands should respect it if !cmd.SilenceUsage && !c.SilenceUsage { c.Println(cmd.UsageString()) @@ -976,12 +1013,16 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { func (c *Command) ValidateArgs(args []string) error { if c.Args == nil { - return nil + return ArbitraryArgs(c, args) } return c.Args(c, args) } func (c *Command) validateRequiredFlags() error { + if c.DisableFlagParsing { + return nil + } + flags := c.Flags() missingFlagNames := []string{} flags.VisitAll(func(pflag *flag.Flag) { @@ -1033,7 +1074,11 @@ func (c *Command) InitDefaultVersionFlag() { } else { usage += c.Name() } - c.Flags().Bool("version", false, usage) + if c.Flags().ShorthandLookup("v") == nil { + c.Flags().BoolP("version", "v", false, usage) + } else { + c.Flags().Bool("version", false, usage) + } } } @@ -1051,15 +1096,33 @@ func (c *Command) InitDefaultHelpCmd() { Short: "Help about any command", Long: `Help provides help for any command in the application. Simply type ` + c.Name() + ` help [path to command] for full details.`, - + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + var completions []string + cmd, _, e := c.Root().Find(args) + if e != nil { + return nil, ShellCompDirectiveNoFileComp + } + if cmd == nil { + // Root help command. + cmd = c.Root() + } + for _, subCmd := range cmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + } + } + return completions, ShellCompDirectiveNoFileComp + }, Run: func(c *Command, args []string) { cmd, _, e := c.Root().Find(args) if cmd == nil || e != nil { c.Printf("Unknown help topic %#q\n", args) - c.Root().Usage() + CheckErr(c.Root().Usage()) } else { cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown - cmd.Help() + CheckErr(cmd.Help()) } }, } @@ -1178,12 +1241,12 @@ func (c *Command) PrintErr(i ...interface{}) { // PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. func (c *Command) PrintErrln(i ...interface{}) { - c.Print(fmt.Sprintln(i...)) + c.PrintErr(fmt.Sprintln(i...)) } // PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. func (c *Command) PrintErrf(format string, i ...interface{}) { - c.Print(fmt.Sprintf(format, i...)) + c.PrintErr(fmt.Sprintf(format, i...)) } // CommandPath returns the full path to this command. diff --git a/mantle/vendor/github.com/spf13/cobra/command_notwin.go b/mantle/vendor/github.com/spf13/cobra/command_notwin.go index 6159c1cc..bb5dad90 100644 --- a/mantle/vendor/github.com/spf13/cobra/command_notwin.go +++ b/mantle/vendor/github.com/spf13/cobra/command_notwin.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package cobra diff --git a/mantle/vendor/github.com/spf13/cobra/command_win.go b/mantle/vendor/github.com/spf13/cobra/command_win.go index 8768b173..a84f5a82 100644 --- a/mantle/vendor/github.com/spf13/cobra/command_win.go +++ b/mantle/vendor/github.com/spf13/cobra/command_win.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package cobra diff --git a/mantle/vendor/github.com/spf13/cobra/completions.go b/mantle/vendor/github.com/spf13/cobra/completions.go new file mode 100644 index 00000000..2c248399 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/completions.go @@ -0,0 +1,832 @@ +package cobra + +import ( + "fmt" + "os" + "strings" + "sync" + + "github.com/spf13/pflag" +) + +const ( + // ShellCompRequestCmd is the name of the hidden command that is used to request + // completion results from the program. It is used by the shell completion scripts. + ShellCompRequestCmd = "__complete" + // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request + // completion results without their description. It is used by the shell completion scripts. + ShellCompNoDescRequestCmd = "__completeNoDesc" +) + +// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. +var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} + +// lock for reading and writing from flagCompletionFunctions +var flagCompletionMutex = &sync.RWMutex{} + +// ShellCompDirective is a bit map representing the different behaviors the shell +// can be instructed to have once completions have been provided. +type ShellCompDirective int + +type flagCompError struct { + subCommand string + flagName string +} + +func (e *flagCompError) Error() string { + return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" +} + +const ( + // ShellCompDirectiveError indicates an error occurred and completions should be ignored. + ShellCompDirectiveError ShellCompDirective = 1 << iota + + // ShellCompDirectiveNoSpace indicates that the shell should not add a space + // after the completion even if there is a single completion provided. + ShellCompDirectiveNoSpace + + // ShellCompDirectiveNoFileComp indicates that the shell should not provide + // file completion even when no completion is provided. + ShellCompDirectiveNoFileComp + + // ShellCompDirectiveFilterFileExt indicates that the provided completions + // should be used as file extension filters. + // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() + // is a shortcut to using this directive explicitly. The BashCompFilenameExt + // annotation can also be used to obtain the same behavior for flags. + ShellCompDirectiveFilterFileExt + + // ShellCompDirectiveFilterDirs indicates that only directory names should + // be provided in file completion. To request directory names within another + // directory, the returned completions should specify the directory within + // which to search. The BashCompSubdirsInDir annotation can be used to + // obtain the same behavior but only for flags. + ShellCompDirectiveFilterDirs + + // =========================================================================== + + // All directives using iota should be above this one. + // For internal use. + shellCompDirectiveMaxValue + + // ShellCompDirectiveDefault indicates to let the shell perform its default + // behavior after completions have been provided. + // This one must be last to avoid messing up the iota count. + ShellCompDirectiveDefault ShellCompDirective = 0 +) + +const ( + // Constants for the completion command + compCmdName = "completion" + compCmdNoDescFlagName = "no-descriptions" + compCmdNoDescFlagDesc = "disable completion descriptions" + compCmdNoDescFlagDefault = false +) + +// CompletionOptions are the options to control shell completion +type CompletionOptions struct { + // DisableDefaultCmd prevents Cobra from creating a default 'completion' command + DisableDefaultCmd bool + // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + // for shells that support completion descriptions + DisableNoDescFlag bool + // DisableDescriptions turns off all completion descriptions for shells + // that support them + DisableDescriptions bool + // HiddenDefaultCmd makes the default 'completion' command hidden + HiddenDefaultCmd bool +} + +// NoFileCompletions can be used to disable file completion for commands that should +// not trigger file completions. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return nil, ShellCompDirectiveNoFileComp +} + +// FixedCompletions can be used to create a completion function which always +// returns the same results. +func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return choices, directive + } +} + +// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { + flag := c.Flag(flagName) + if flag == nil { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) + } + flagCompletionMutex.Lock() + defer flagCompletionMutex.Unlock() + + if _, exists := flagCompletionFunctions[flag]; exists { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) + } + flagCompletionFunctions[flag] = f + return nil +} + +// Returns a string listing the different directive enabled in the specified parameter +func (d ShellCompDirective) string() string { + var directives []string + if d&ShellCompDirectiveError != 0 { + directives = append(directives, "ShellCompDirectiveError") + } + if d&ShellCompDirectiveNoSpace != 0 { + directives = append(directives, "ShellCompDirectiveNoSpace") + } + if d&ShellCompDirectiveNoFileComp != 0 { + directives = append(directives, "ShellCompDirectiveNoFileComp") + } + if d&ShellCompDirectiveFilterFileExt != 0 { + directives = append(directives, "ShellCompDirectiveFilterFileExt") + } + if d&ShellCompDirectiveFilterDirs != 0 { + directives = append(directives, "ShellCompDirectiveFilterDirs") + } + if len(directives) == 0 { + directives = append(directives, "ShellCompDirectiveDefault") + } + + if d >= shellCompDirectiveMaxValue { + return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) + } + return strings.Join(directives, ", ") +} + +// Adds a special hidden command that can be used to request custom completions. +func (c *Command) initCompleteCmd(args []string) { + completeCmd := &Command{ + Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), + Aliases: []string{ShellCompNoDescRequestCmd}, + DisableFlagsInUseLine: true, + Hidden: true, + DisableFlagParsing: true, + Args: MinimumNArgs(1), + Short: "Request shell completion choices for the specified command-line", + Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", + "to request completion choices for the specified command-line.", ShellCompRequestCmd), + Run: func(cmd *Command, args []string) { + finalCmd, completions, directive, err := cmd.getCompletions(args) + if err != nil { + CompErrorln(err.Error()) + // Keep going for multiple reasons: + // 1- There could be some valid completions even though there was an error + // 2- Even without completions, we need to print the directive + } + + noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + for _, comp := range completions { + if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { + // Remove all activeHelp entries in this case + if strings.HasPrefix(comp, activeHelpMarker) { + continue + } + } + if noDescriptions { + // Remove any description that may be included following a tab character. + comp = strings.Split(comp, "\t")[0] + } + + // Make sure we only write the first line to the output. + // This is needed if a description contains a linebreak. + // Otherwise the shell scripts will interpret the other lines as new flags + // and could therefore provide a wrong completion. + comp = strings.Split(comp, "\n")[0] + + // Finally trim the completion. This is especially important to get rid + // of a trailing tab when there are no description following it. + // For example, a sub-command without a description should not be completed + // with a tab at the end (or else zsh will show a -- following it + // although there is no description). + comp = strings.TrimSpace(comp) + + // Print each possible completion to stdout for the completion script to consume. + fmt.Fprintln(finalCmd.OutOrStdout(), comp) + } + + // As the last printout, print the completion directive for the completion script to parse. + // The directive integer must be that last character following a single colon (:). + // The completion script expects : + fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + + // Print some helpful info to stderr for the user to understand. + // Output from stderr must be ignored by the completion script. + fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) + }, + } + c.AddCommand(completeCmd) + subCmd, _, err := c.Find(args) + if err != nil || subCmd.Name() != ShellCompRequestCmd { + // Only create this special command if it is actually being called. + // This reduces possible side-effects of creating such a command; + // for example, having this command would cause problems to a + // cobra program that only consists of the root command, since this + // command would cause the root command to suddenly have a subcommand. + c.RemoveCommand(completeCmd) + } +} + +func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { + // The last argument, which is not completely typed by the user, + // should not be part of the list of arguments + toComplete := args[len(args)-1] + trimmedArgs := args[:len(args)-1] + + var finalCmd *Command + var finalArgs []string + var err error + // Find the real command for which completion must be performed + // check if we need to traverse here to parse local flags on parent commands + if c.Root().TraverseChildren { + finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) + } else { + // For Root commands that don't specify any value for their Args fields, when we call + // Find(), if those Root commands don't have any sub-commands, they will accept arguments. + // However, because we have added the __complete sub-command in the current code path, the + // call to Find() -> legacyArgs() will return an error if there are any arguments. + // To avoid this, we first remove the __complete command to get back to having no sub-commands. + rootCmd := c.Root() + if len(rootCmd.Commands()) == 1 { + rootCmd.RemoveCommand(c) + } + + finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs) + } + if err != nil { + // Unable to find the real command. E.g., someInvalidCmd + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + } + finalCmd.ctx = c.ctx + + // Check if we are doing flag value completion before parsing the flags. + // This is important because if we are completing a flag value, we need to also + // remove the flag name argument from the list of finalArgs or else the parsing + // could fail due to an invalid value (incomplete) for the flag. + flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) + + // Check if interspersed is false or -- was set on a previous arg. + // This works by counting the arguments. Normally -- is not counted as arg but + // if -- was already set or interspersed is false and there is already one arg then + // the extra added -- is counted as arg. + flagCompletion := true + _ = finalCmd.ParseFlags(append(finalArgs, "--")) + newArgCount := finalCmd.Flags().NArg() + + // Parse the flags early so we can check if required flags are set + if err = finalCmd.ParseFlags(finalArgs); err != nil { + return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + } + + realArgCount := finalCmd.Flags().NArg() + if newArgCount > realArgCount { + // don't do flag completion (see above) + flagCompletion = false + } + // Error while attempting to parse flags + if flagErr != nil { + // If error type is flagCompError and we don't want flagCompletion we should ignore the error + if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + } + } + + // We only remove the flags from the arguments if DisableFlagParsing is not set. + // This is important for commands which have requested to do their own flag completion. + if !finalCmd.DisableFlagParsing { + finalArgs = finalCmd.Flags().Args() + } + + if flag != nil && flagCompletion { + // Check if we are completing a flag value subject to annotations + if validExts, present := flag.Annotations[BashCompFilenameExt]; present { + if len(validExts) != 0 { + // File completion filtered by extensions + return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil + } + + // The annotation requests simple file completion. There is no reason to do + // that since it is the default behavior anyway. Let's ignore this annotation + // in case the program also registered a completion function for this flag. + // Even though it is a mistake on the program's side, let's be nice when we can. + } + + if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { + if len(subDir) == 1 { + // Directory completion from within a directory + return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil + } + // Directory completion + return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + } + } + + var completions []string + var directive ShellCompDirective + + // Enforce flag groups before doing flag completions + finalCmd.enforceFlagGroupsForCompletion() + + // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true; + // doing this allows for completion of persistent flag names even for commands that disable flag parsing. + // + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires + // the flag name to be complete + if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { + // First check for required flags + completions = completeRequireFlags(finalCmd, toComplete) + + // If we have not found any required flags, only then can we show regular flags + if len(completions) == 0 { + doCompleteFlags := func(flag *pflag.Flag) { + if !flag.Changed || + strings.Contains(flag.Value.Type(), "Slice") || + strings.Contains(flag.Value.Type(), "Array") { + // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + // we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + } + + directive = ShellCompDirectiveNoFileComp + if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { + // If there is a single completion, the shell usually adds a space + // after the completion. We don't want that if the flag ends with an = + directive = ShellCompDirectiveNoSpace + } + + if !finalCmd.DisableFlagParsing { + // If DisableFlagParsing==false, we have completed the flags as known by Cobra; + // we can return what we found. + // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we + // let the logic continue to see if ValidArgsFunction needs to be called. + return finalCmd, completions, directive, nil + } + } else { + directive = ShellCompDirectiveDefault + if flag == nil { + foundLocalNonPersistentFlag := false + // If TraverseChildren is true on the root command we don't check for + // local flags because we can use a local flag on a parent command + if !finalCmd.Root().TraverseChildren { + // Check if there are any local, non-persistent flags on the command-line + localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { + foundLocalNonPersistentFlag = true + } + }) + } + + // Complete subcommand names, including the help command + if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { + // We only complete sub-commands if: + // - there are no arguments on the command-line and + // - there are no local, non-persistent flags on the command-line or TraverseChildren is true + for _, subCmd := range finalCmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + directive = ShellCompDirectiveNoFileComp + } + } + } + + // Complete required flags even without the '-' prefix + completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) + + // Always complete ValidArgs, even if we are completing a subcommand name. + // This is for commands that have both subcommands and ValidArgs. + if len(finalCmd.ValidArgs) > 0 { + if len(finalArgs) == 0 { + // ValidArgs are only for the first argument + for _, validArg := range finalCmd.ValidArgs { + if strings.HasPrefix(validArg, toComplete) { + completions = append(completions, validArg) + } + } + directive = ShellCompDirectiveNoFileComp + + // If no completions were found within commands or ValidArgs, + // see if there are any ArgAliases that should be completed. + if len(completions) == 0 { + for _, argAlias := range finalCmd.ArgAliases { + if strings.HasPrefix(argAlias, toComplete) { + completions = append(completions, argAlias) + } + } + } + } + + // If there are ValidArgs specified (even if they don't match), we stop completion. + // Only one of ValidArgs or ValidArgsFunction can be used for a single command. + return finalCmd, completions, directive, nil + } + + // Let the logic continue so as to add any ValidArgsFunction completions, + // even if we already found sub-commands. + // This is for commands that have subcommands but also specify a ValidArgsFunction. + } + } + + // Find the completion function for the flag or command + var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + if flag != nil && flagCompletion { + flagCompletionMutex.RLock() + completionFn = flagCompletionFunctions[flag] + flagCompletionMutex.RUnlock() + } else { + completionFn = finalCmd.ValidArgsFunction + } + if completionFn != nil { + // Go custom completion defined for this flag or command. + // Call the registered completion function to get the completions. + var comps []string + comps, directive = completionFn(finalCmd, finalArgs, toComplete) + completions = append(completions, comps...) + } + + return finalCmd, completions, directive, nil +} + +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { + if nonCompletableFlag(flag) { + return []string{} + } + + var completions []string + flagName := "--" + flag.Name + if strings.HasPrefix(flagName, toComplete) { + // Flag without the = + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + + // Why suggest both long forms: --flag and --flag= ? + // This forces the user to *always* have to type either an = or a space after the flag name. + // Let's be nice and avoid making users have to do that. + // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. + // The = form will still work, we just won't suggest it. + // This also makes the list of suggested flags shorter as we avoid all the = forms. + // + // if len(flag.NoOptDefVal) == 0 { + // // Flag requires a value, so it can be suffixed with = + // flagName += "=" + // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // } + } + + flagName = "-" + flag.Shorthand + if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + } + + return completions +} + +func completeRequireFlags(finalCmd *Command, toComplete string) []string { + var completions []string + + doCompleteRequiredFlags := func(flag *pflag.Flag) { + if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { + if !flag.Changed { + // If the flag is not already present, we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + + return completions +} + +func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { + if finalCmd.DisableFlagParsing { + // We only do flag completion if we are allowed to parse flags + // This is important for commands which have requested to do their own flag completion. + return nil, args, lastArg, nil + } + + var flagName string + trimmedArgs := args + flagWithEqual := false + orgLastArg := lastArg + + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as that function + // requires the flag name to be complete + if len(lastArg) > 0 && lastArg[0] == '-' { + if index := strings.Index(lastArg, "="); index >= 0 { + // Flag with an = + if strings.HasPrefix(lastArg[:index], "--") { + // Flag has full name + flagName = lastArg[2:index] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = lastArg[index-1 : index] + } + lastArg = lastArg[index+1:] + flagWithEqual = true + } else { + // Normal flag completion + return nil, args, lastArg, nil + } + } + + if len(flagName) == 0 { + if len(args) > 0 { + prevArg := args[len(args)-1] + if isFlagArg(prevArg) { + // Only consider the case where the flag does not contain an =. + // If the flag contains an = it means it has already been fully processed, + // so we don't need to deal with it here. + if index := strings.Index(prevArg, "="); index < 0 { + if strings.HasPrefix(prevArg, "--") { + // Flag has full name + flagName = prevArg[2:] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = prevArg[len(prevArg)-1:] + } + // Remove the uncompleted flag or else there could be an error created + // for an invalid value for that flag + trimmedArgs = args[:len(args)-1] + } + } + } + } + + if len(flagName) == 0 { + // Not doing flag completion + return nil, trimmedArgs, lastArg, nil + } + + flag := findFlag(finalCmd, flagName) + if flag == nil { + // Flag not supported by this command, the interspersed option might be set so return the original args + return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} + } + + if !flagWithEqual { + if len(flag.NoOptDefVal) != 0 { + // We had assumed dealing with a two-word flag but the flag is a boolean flag. + // In that case, there is no value following it, so we are not really doing flag completion. + // Reset everything to do noun completion. + trimmedArgs = args + flag = nil + } + } + + return flag, trimmedArgs, lastArg, nil +} + +// initDefaultCompletionCmd adds a default 'completion' command to c. +// This function will do nothing if any of the following is true: +// 1- the feature has been explicitly disabled by the program, +// 2- c has no subcommands (to avoid creating one), +// 3- c already has a 'completion' command provided by the program. +func (c *Command) initDefaultCompletionCmd() { + if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { + return + } + + for _, cmd := range c.commands { + if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { + // A completion command is already available + return + } + } + + haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + + completionCmd := &Command{ + Use: compCmdName, + Short: "Generate the autocompletion script for the specified shell", + Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell. +See each sub-command's help for details on how to use the generated script. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + Hidden: c.CompletionOptions.HiddenDefaultCmd, + } + c.AddCommand(completionCmd) + + out := c.OutOrStdout() + noDesc := c.CompletionOptions.DisableDescriptions + shortDesc := "Generate the autocompletion script for %s" + bash := &Command{ + Use: "bash", + Short: fmt.Sprintf(shortDesc, "bash"), + Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(%[1]s completion bash) + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion bash > /etc/bash_completion.d/%[1]s + +#### macOS: + + %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + DisableFlagsInUseLine: true, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenBashCompletionV2(out, !noDesc) + }, + } + if haveNoDescFlag { + bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + zsh := &Command{ + Use: "zsh", + Short: fmt.Sprintf(shortDesc, "zsh"), + Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(%[1]s completion zsh); compdef _%[1]s %[1]s + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion zsh > "${fpath[1]}/_%[1]s" + +#### macOS: + + %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenZshCompletionNoDesc(out) + } + return cmd.Root().GenZshCompletion(out) + }, + } + if haveNoDescFlag { + zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + fish := &Command{ + Use: "fish", + Short: fmt.Sprintf(shortDesc, "fish"), + Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + %[1]s completion fish | source + +To load completions for every new session, execute once: + + %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenFishCompletion(out, !noDesc) + }, + } + if haveNoDescFlag { + fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + powershell := &Command{ + Use: "powershell", + Short: fmt.Sprintf(shortDesc, "powershell"), + Long: fmt.Sprintf(`Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + %[1]s completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenPowerShellCompletion(out) + } + return cmd.Root().GenPowerShellCompletionWithDesc(out) + + }, + } + if haveNoDescFlag { + powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + completionCmd.AddCommand(bash, zsh, fish, powershell) +} + +func findFlag(cmd *Command, name string) *pflag.Flag { + flagSet := cmd.Flags() + if len(name) == 1 { + // First convert the short flag into a long flag + // as the cmd.Flag() search only accepts long flags + if short := flagSet.ShorthandLookup(name); short != nil { + name = short.Name + } else { + set := cmd.InheritedFlags() + if short = set.ShorthandLookup(name); short != nil { + name = short.Name + } else { + return nil + } + } + } + return cmd.Flag(name) +} + +// CompDebug prints the specified string to the same file as where the +// completion script prints its logs. +// Note that completion printouts should never be on stdout as they would +// be wrongly interpreted as actual completion choices by the completion script. +func CompDebug(msg string, printToStdErr bool) { + msg = fmt.Sprintf("[Debug] %s", msg) + + // Such logs are only printed when the user has set the environment + // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. + if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { + f, err := os.OpenFile(path, + os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err == nil { + defer f.Close() + WriteStringAndCheck(f, msg) + } + } + + if printToStdErr { + // Must print to stderr for this not to be read by the completion script. + fmt.Fprint(os.Stderr, msg) + } +} + +// CompDebugln prints the specified string with a newline at the end +// to the same file as where the completion script prints its logs. +// Such logs are only printed when the user has set the environment +// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. +func CompDebugln(msg string, printToStdErr bool) { + CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) +} + +// CompError prints the specified completion message to stderr. +func CompError(msg string) { + msg = fmt.Sprintf("[Error] %s", msg) + CompDebug(msg, true) +} + +// CompErrorln prints the specified completion message to stderr with a newline at the end. +func CompErrorln(msg string) { + CompError(fmt.Sprintf("%s\n", msg)) +} diff --git a/mantle/vendor/github.com/spf13/cobra/fish_completions.go b/mantle/vendor/github.com/spf13/cobra/fish_completions.go new file mode 100644 index 00000000..005ee6be --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/fish_completions.go @@ -0,0 +1,220 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +func genFishComp(buf io.StringWriter, name string, includeDesc bool) { + // Variables should not contain a '-' or ':' character + nameForVar := name + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") + + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` +function __%[1]s_debug + set -l file "$BASH_COMP_DEBUG_FILE" + if test -n "$file" + echo "$argv" >> $file + end +end + +function __%[1]s_perform_completion + __%[1]s_debug "Starting __%[1]s_perform_completion" + + # Extract all args except the last one + set -l args (commandline -opc) + # Extract the last arg and escape it in case it is a space + set -l lastArg (string escape -- (commandline -ct)) + + __%[1]s_debug "args: $args" + __%[1]s_debug "last arg: $lastArg" + + # Disable ActiveHelp which is not supported for fish shell + set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg" + + __%[1]s_debug "Calling $requestComp" + set -l results (eval $requestComp 2> /dev/null) + + # Some programs may output extra empty lines after the directive. + # Let's ignore them or else it will break completion. + # Ref: https://github.com/spf13/cobra/issues/1279 + for line in $results[-1..1] + if test (string trim -- $line) = "" + # Found an empty line, remove it + set results $results[1..-2] + else + # Found non-empty line, we have our proper output + break + end + end + + set -l comps $results[1..-2] + set -l directiveLine $results[-1] + + # For Fish, when completing a flag with an = (e.g., -n=) + # completions must be prefixed with the flag + set -l flagPrefix (string match -r -- '-.*=' "$lastArg") + + __%[1]s_debug "Comps: $comps" + __%[1]s_debug "DirectiveLine: $directiveLine" + __%[1]s_debug "flagPrefix: $flagPrefix" + + for comp in $comps + printf "%%s%%s\n" "$flagPrefix" "$comp" + end + + printf "%%s\n" "$directiveLine" +end + +# This function does two things: +# - Obtain the completions and store them in the global __%[1]s_comp_results +# - Return false if file completion should be performed +function __%[1]s_prepare_completions + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + + # Start fresh + set --erase __%[1]s_comp_results + + set -l results (__%[1]s_perform_completion) + __%[1]s_debug "Completion results: $results" + + if test -z "$results" + __%[1]s_debug "No completion, probably due to a failure" + # Might as well do file completion, in case it helps + return 1 + end + + set -l directive (string sub --start 2 $results[-1]) + set --global __%[1]s_comp_results $results[1..-2] + + __%[1]s_debug "Completions are: $__%[1]s_comp_results" + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveError %[4]d + set -l shellCompDirectiveNoSpace %[5]d + set -l shellCompDirectiveNoFileComp %[6]d + set -l shellCompDirectiveFilterFileExt %[7]d + set -l shellCompDirectiveFilterDirs %[8]d + + if test -z "$directive" + set directive 0 + end + + set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) + if test $compErr -eq 1 + __%[1]s_debug "Received error directive: aborting." + # Might as well do file completion, in case it helps + return 1 + end + + set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) + set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) + if test $filefilter -eq 1; or test $dirfilter -eq 1 + __%[1]s_debug "File extension filtering or directory filtering not supported" + # Do full file completion instead + return 1 + end + + set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) + set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) + + __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" + + # If we want to prevent a space, or if file completion is NOT disabled, + # we need to count the number of valid completions. + # To do so, we will filter on prefix as the completions we have received + # may not already be filtered so as to allow fish to match on different + # criteria than the prefix. + if test $nospace -ne 0; or test $nofiles -eq 0 + set -l prefix (commandline -t | string escape --style=regex) + __%[1]s_debug "prefix: $prefix" + + set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) + set --global __%[1]s_comp_results $completions + __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" + + # Important not to quote the variable for count to work + set -l numComps (count $__%[1]s_comp_results) + __%[1]s_debug "numComps: $numComps" + + if test $numComps -eq 1; and test $nospace -ne 0 + # We must first split on \t to get rid of the descriptions to be + # able to check what the actual completion will be. + # We don't need descriptions anyway since there is only a single + # real completion which the shell will expand immediately. + set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) + + # Fish won't add a space if the completion ends with any + # of the following characters: @=/:., + set -l lastChar (string sub -s -1 -- $split) + if not string match -r -q "[@=/:.,]" -- "$lastChar" + # In other cases, to support the "nospace" directive we trick the shell + # by outputting an extra, longer completion. + __%[1]s_debug "Adding second completion to perform nospace directive" + set --global __%[1]s_comp_results $split[1] $split[1]. + __%[1]s_debug "Completions are now: $__%[1]s_comp_results" + end + end + + if test $numComps -eq 0; and test $nofiles -eq 0 + # To be consistent with bash and zsh, we only trigger file + # completion when there are no other completions + __%[1]s_debug "Requesting file completion" + return 1 + end + end + + return 0 +end + +# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves +# so we can properly delete any completions provided by another script. +# Only do this if the program can be found, or else fish may print some errors; besides, +# the existing completions will only be loaded if the program can be found. +if type -q "%[2]s" + # The space after the program name is essential to trigger completion for the program + # and not completion of the program name itself. + # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. + complete --do-complete "%[2]s " > /dev/null 2>&1 +end + +# Remove any pre-existing completions for the program since we will be handling all of them. +complete -c %[2]s -e + +# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results +# which provides the program's completion choices. +complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' + +`, nameForVar, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) +} + +// GenFishCompletion generates fish completion file and writes to the passed writer. +func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genFishComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +// GenFishCompletionFile generates fish completion file. +func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenFishCompletion(outFile, includeDesc) +} diff --git a/mantle/vendor/github.com/spf13/cobra/fish_completions.md b/mantle/vendor/github.com/spf13/cobra/fish_completions.md new file mode 100644 index 00000000..19b2ed12 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/fish_completions.md @@ -0,0 +1,4 @@ +## Generating Fish Completions For Your cobra.Command + +Please refer to [Shell Completions](shell_completions.md) for details. + diff --git a/mantle/vendor/github.com/spf13/cobra/flag_groups.go b/mantle/vendor/github.com/spf13/cobra/flag_groups.go new file mode 100644 index 00000000..dc784311 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/flag_groups.go @@ -0,0 +1,223 @@ +// Copyright © 2022 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +const ( + requiredAsGroup = "cobra_annotation_required_if_others_set" + mutuallyExclusive = "cobra_annotation_mutually_exclusive" +) + +// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors +// if the command is invoked with a subset (but not all) of the given flags. +func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) + } + if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + +// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors +// if the command is invoked with more than one flag from the given set of flags. +func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) + } + // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. + if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + panic(err) + } + } +} + +// validateFlagGroups validates the mutuallyExclusive/requiredAsGroup logic and returns the +// first error encountered. +func (c *Command) validateFlagGroups() error { + if c.DisableFlagParsing { + return nil + } + + flags := c.Flags() + + // groupStatus format is the list of flags as a unique ID, + // then a map of each flag name and whether it is set or not. + groupStatus := map[string]map[string]bool{} + mutuallyExclusiveGroupStatus := map[string]map[string]bool{} + flags.VisitAll(func(pflag *flag.Flag) { + processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + }) + + if err := validateRequiredFlagGroups(groupStatus); err != nil { + return err + } + if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { + return err + } + return nil +} + +func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool { + for _, fname := range flagnames { + f := fs.Lookup(fname) + if f == nil { + return false + } + } + return true +} + +func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) { + groupInfo, found := pflag.Annotations[annotation] + if found { + for _, group := range groupInfo { + if groupStatus[group] == nil { + flagnames := strings.Split(group, " ") + + // Only consider this flag group at all if all the flags are defined. + if !hasAllFlags(flags, flagnames...) { + continue + } + + groupStatus[group] = map[string]bool{} + for _, name := range flagnames { + groupStatus[group][name] = false + } + } + + groupStatus[group][pflag.Name] = pflag.Changed + } + } +} + +func validateRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + + unset := []string{} + for flagname, isSet := range flagnameAndStatus { + if !isSet { + unset = append(unset, flagname) + } + } + if len(unset) == len(flagnameAndStatus) || len(unset) == 0 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(unset) + return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset) + } + + return nil +} + +func validateExclusiveFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) == 0 || len(set) == 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set) + } + return nil +} + +func sortedKeys(m map[string]map[string]bool) []string { + keys := make([]string, len(m)) + i := 0 + for k := range m { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +// enforceFlagGroupsForCompletion will do the following: +// - when a flag in a group is present, other flags in the group will be marked required +// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden +// This allows the standard completion logic to behave appropriately for flag groups +func (c *Command) enforceFlagGroupsForCompletion() { + if c.DisableFlagParsing { + return + } + + flags := c.Flags() + groupStatus := map[string]map[string]bool{} + mutuallyExclusiveGroupStatus := map[string]map[string]bool{} + c.Flags().VisitAll(func(pflag *flag.Flag) { + processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + }) + + // If a flag that is part of a group is present, we make all the other flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range groupStatus { + for _, isSet := range flagnameAndStatus { + if isSet { + // One of the flags of the group is set, mark the other ones as required + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + } + + // If a flag that is mutually exclusive to others is present, we hide the other + // flags of that group so the shell completion does not suggest them + for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { + for flagName, isSet := range flagnameAndStatus { + if isSet { + // One of the flags of the mutually exclusive group is set, mark the other ones as hidden + // Don't mark the flag that is already set as hidden because it may be an + // array or slice flag and therefore must continue being suggested + for _, fName := range strings.Split(flagList, " ") { + if fName != flagName { + flag := c.Flags().Lookup(fName) + flag.Hidden = true + } + } + } + } + } +} diff --git a/mantle/vendor/github.com/spf13/cobra/go.mod b/mantle/vendor/github.com/spf13/cobra/go.mod deleted file mode 100644 index dea1030b..00000000 --- a/mantle/vendor/github.com/spf13/cobra/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/spf13/cobra - -go 1.12 - -require ( - github.com/cpuguy83/go-md2man/v2 v2.0.0 - github.com/inconshreveable/mousetrap v1.0.0 - github.com/mitchellh/go-homedir v1.1.0 - github.com/spf13/pflag v1.0.3 - github.com/spf13/viper v1.4.0 - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/mantle/vendor/github.com/spf13/cobra/go.sum b/mantle/vendor/github.com/spf13/cobra/go.sum deleted file mode 100644 index 3aaa2ac0..00000000 --- a/mantle/vendor/github.com/spf13/cobra/go.sum +++ /dev/null @@ -1,149 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/mantle/vendor/github.com/spf13/cobra/powershell_completions.go b/mantle/vendor/github.com/spf13/cobra/powershell_completions.go index 756c61b9..379e7c08 100644 --- a/mantle/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/mantle/vendor/github.com/spf13/cobra/powershell_completions.go @@ -1,6 +1,3 @@ -// PowerShell completions are based on the amazing work from clap: -// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs -// // The generated scripts require PowerShell v5.0+ (which comes Windows 10, but // can be downloaded separately for windows 7 or 8.1). @@ -11,90 +8,281 @@ import ( "fmt" "io" "os" - "strings" - - "github.com/spf13/pflag" ) -var powerShellCompletionTemplate = `using namespace System.Management.Automation -using namespace System.Management.Automation.Language -Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock { - param($wordToComplete, $commandAst, $cursorPosition) - $commandElements = $commandAst.CommandElements - $command = @( - '%s' - for ($i = 1; $i -lt $commandElements.Count; $i++) { - $element = $commandElements[$i] - if ($element -isnot [StringConstantExpressionAst] -or - $element.StringConstantType -ne [StringConstantType]::BareWord -or - $element.Value.StartsWith('-')) { - break - } - $element.Value - } - ) -join ';' - $completions = @(switch ($command) {%s - }) - $completions.Where{ $_.CompletionText -like "$wordToComplete*" } | - Sort-Object -Property ListItemText -}` - -func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) { - var cmdName string - if previousCommandName == "" { - cmdName = cmd.Name() - } else { - cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name()) - } - - fmt.Fprintf(out, "\n '%s' {", cmdName) - - cmd.Flags().VisitAll(func(flag *pflag.Flag) { - if nonCompletableFlag(flag) { - return - } - usage := escapeStringForPowerShell(flag.Usage) - if len(flag.Shorthand) > 0 { - fmt.Fprintf(out, "\n [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage) - } - fmt.Fprintf(out, "\n [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage) - }) - - for _, subCmd := range cmd.Commands() { - usage := escapeStringForPowerShell(subCmd.Short) - fmt.Fprintf(out, "\n [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage) +func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd } + WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- - fmt.Fprint(out, "\n break\n }") - - for _, subCmd := range cmd.Commands() { - generatePowerShellSubcommandCases(out, subCmd, cmdName) - } +function __%[1]s_debug { + if ($env:BASH_COMP_DEBUG_FILE) { + "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" + } } -func escapeStringForPowerShell(s string) string { - return strings.Replace(s, "'", "''", -1) +filter __%[1]s_escapeStringWithSpecialChars { +`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` } -// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer. -func (c *Command) GenPowerShellCompletion(w io.Writer) error { - buf := new(bytes.Buffer) +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock { + param( + $WordToComplete, + $CommandAst, + $CursorPosition + ) + + # Get the current command line and convert into a string + $Command = $CommandAst.CommandElements + $Command = "$Command" + + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CursorPosition location, so we need + # to truncate the command-line ($Command) up to the $CursorPosition location. + # Make sure the $Command is longer then the $CursorPosition before we truncate. + # This happens because the $Command does not include the last space. + if ($Command.Length -gt $CursorPosition) { + $Command=$Command.Substring(0,$CursorPosition) + } + __%[1]s_debug "Truncated command: $Command" + + $ShellCompDirectiveError=%[3]d + $ShellCompDirectiveNoSpace=%[4]d + $ShellCompDirectiveNoFileComp=%[5]d + $ShellCompDirectiveFilterFileExt=%[6]d + $ShellCompDirectiveFilterDirs=%[7]d + + # Prepare the command to request completions for the program. + # Split the command at the first space to separate the program and arguments. + $Program,$Arguments = $Command.Split(" ",2) + + $RequestComp="$Program %[2]s $Arguments" + __%[1]s_debug "RequestComp: $RequestComp" + + # we cannot use $WordToComplete because it + # has the wrong values if the cursor was moved + # so use the last argument + if ($WordToComplete -ne "" ) { + $WordToComplete = $Arguments.Split(" ")[-1] + } + __%[1]s_debug "New WordToComplete: $WordToComplete" + + + # Check for flag with equal sign + $IsEqualFlag = ($WordToComplete -Like "--*=*" ) + if ( $IsEqualFlag ) { + __%[1]s_debug "Completing equal sign flag" + # Remove the flag part + $Flag,$WordToComplete = $WordToComplete.Split("=",2) + } + + if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" +`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+` +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } + + __%[1]s_debug "Calling $RequestComp" + # First disable ActiveHelp which is not supported for Powershell + $env:%[8]s=0 + + #call the command store the output in $out and redirect stderr and stdout to null + # $Out is an array contains each line per element + Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null + + # get directive from last line + [int]$Directive = $Out[-1].TrimStart(':') + if ($Directive -eq "") { + # There is no directive specified + $Directive = 0 + } + __%[1]s_debug "The completion directive is: $Directive" + + # remove directive (last element) from out + $Out = $Out | Where-Object { $_ -ne $Out[-1] } + __%[1]s_debug "The completions are: $Out" + + if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + } + + $Longest = 0 + $Values = $Out | ForEach-Object { + #Split the output in name and description +`+" $Name, $Description = $_.Split(\"`t\",2)"+` + __%[1]s_debug "Name: $Name Description: $Description" + + # Look for the longest completion so that we can format things nicely + if ($Longest -lt $Name.Length) { + $Longest = $Name.Length + } + + # Set the description to a one space string if there is none set. + # This is needed because the CompletionResult does not accept an empty string as argument + if (-Not $Description) { + $Description = " " + } + @{Name="$Name";Description="$Description"} + } + + + $Space = " " + if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { + # remove the space here + __%[1]s_debug "ShellCompDirectiveNoSpace is called" + $Space = "" + } + + if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or + (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { + __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" + + # return here to prevent the completion of the extensions + return + } + + $Values = $Values | Where-Object { + # filter the result + $_.Name -like "$WordToComplete*" + + # Join the flag back if we have an equal sign flag + if ( $IsEqualFlag ) { + __%[1]s_debug "Join the equal sign flag back to the completion value" + $_.Name = $Flag + "=" + $_.Name + } + } + + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { + __%[1]s_debug "ShellCompDirectiveNoFileComp is called" + + if ($Values.Length -eq 0) { + # Just print an empty string here so the + # shell does not start to complete paths. + # We cannot use CompletionResult here because + # it does not accept an empty string as argument. + "" + return + } + } + + # Get the current mode + $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function + __%[1]s_debug "Mode: $Mode" + + $Values | ForEach-Object { + + # store temporary because switch will overwrite $_ + $comp = $_ + + # PowerShell supports three different completion modes + # - TabCompleteNext (default windows style - on each key press the next option is displayed) + # - Complete (works like bash) + # - MenuComplete (works like zsh) + # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function + + # CompletionResult Arguments: + # 1) CompletionText text to be used as the auto completion result + # 2) ListItemText text to be displayed in the suggestion list + # 3) ResultType type of completion result + # 4) ToolTip text for the tooltip with details about the object - var subCommandCases bytes.Buffer - generatePowerShellSubcommandCases(&subCommandCases, c, "") - fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String()) + switch ($Mode) { + # bash like + "Complete" { + + if ($Values.Length -eq 1) { + __%[1]s_debug "Only one completion left" + + # insert space after value + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + } else { + # Add the proper number of spaces to align the descriptions + while($comp.Name.Length -lt $Longest) { + $comp.Name = $comp.Name + " " + } + + # Check for empty description and only add parentheses if needed + if ($($comp.Description) -eq " " ) { + $Description = "" + } else { + $Description = " ($($comp.Description))" + } + + [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } + } + + # zsh like + "MenuComplete" { + # insert space after value + # MenuComplete will automatically show the ToolTip of + # the highlighted value at the bottom of the suggestions. + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + + # TabCompleteNext and in case we get something unknown + Default { + # Like MenuComplete but we don't want to add a space here because + # the user need to press space anyway to get the completion. + # Description will not be shown because that's not possible with TabCompleteNext + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + } + + } +} +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) +} + +func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genPowerShellComp(buf, c.Name(), includeDesc) _, err := buf.WriteTo(w) return err } -// GenPowerShellCompletionFile generates PowerShell completion file. -func (c *Command) GenPowerShellCompletionFile(filename string) error { +func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { outFile, err := os.Create(filename) if err != nil { return err } defer outFile.Close() - return c.GenPowerShellCompletion(outFile) + return c.genPowerShellCompletion(outFile, includeDesc) +} + +// GenPowerShellCompletionFile generates powershell completion file without descriptions. +func (c *Command) GenPowerShellCompletionFile(filename string) error { + return c.genPowerShellCompletionFile(filename, false) +} + +// GenPowerShellCompletion generates powershell completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletion(w io.Writer) error { + return c.genPowerShellCompletion(w, false) +} + +// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. +func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { + return c.genPowerShellCompletionFile(filename, true) +} + +// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { + return c.genPowerShellCompletion(w, true) } diff --git a/mantle/vendor/github.com/spf13/cobra/powershell_completions.md b/mantle/vendor/github.com/spf13/cobra/powershell_completions.md index afed8024..c449f1e5 100644 --- a/mantle/vendor/github.com/spf13/cobra/powershell_completions.md +++ b/mantle/vendor/github.com/spf13/cobra/powershell_completions.md @@ -1,14 +1,3 @@ # Generating PowerShell Completions For Your Own cobra.Command -Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. - -# What's supported - -- Completion for subcommands using their `.Short` description -- Completion for non-hidden flags using their `.Name` and `.Shorthand` - -# What's not yet supported - -- Command aliases -- Required, filename or custom flags (they will work like normal flags) -- Custom completion scripts +Please refer to [Shell Completions](shell_completions.md#powershell-completions) for details. diff --git a/mantle/vendor/github.com/spf13/cobra/projects_using_cobra.md b/mantle/vendor/github.com/spf13/cobra/projects_using_cobra.md new file mode 100644 index 00000000..ac680118 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/projects_using_cobra.md @@ -0,0 +1,54 @@ +## Projects using Cobra + +- [Arduino CLI](https://github.com/arduino/arduino-cli) +- [Bleve](https://blevesearch.com/) +- [CockroachDB](https://www.cockroachlabs.com/) +- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) +- [Datree](https://github.com/datreeio/datree) +- [Delve](https://github.com/derekparker/delve) +- [Docker (distribution)](https://github.com/docker/distribution) +- [Etcd](https://etcd.io/) +- [Gardener](https://github.com/gardener/gardenctl) +- [Giant Swarm's gsctl](https://github.com/giantswarm/gsctl) +- [Git Bump](https://github.com/erdaltsksn/git-bump) +- [Github CLI](https://github.com/cli/cli) +- [GitHub Labeler](https://github.com/erdaltsksn/gh-label) +- [Golangci-lint](https://golangci-lint.run) +- [GopherJS](https://github.com/gopherjs/gopherjs) +- [GoReleaser](https://goreleaser.com) +- [Helm](https://helm.sh) +- [Hugo](https://gohugo.io) +- [Infracost](https://github.com/infracost/infracost) +- [Istio](https://istio.io) +- [Kool](https://github.com/kool-dev/kool) +- [Kubernetes](https://kubernetes.io/) +- [Kubescape](https://github.com/armosec/kubescape) +- [Linkerd](https://linkerd.io/) +- [Mattermost-server](https://github.com/mattermost/mattermost-server) +- [Mercure](https://mercure.rocks/) +- [Meroxa CLI](https://github.com/meroxa/cli) +- [Metal Stack CLI](https://github.com/metal-stack/metalctl) +- [Moby (former Docker)](https://github.com/moby/moby) +- [Moldy](https://github.com/Moldy-Community/moldy) +- [Multi-gitter](https://github.com/lindell/multi-gitter) +- [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +- [nFPM](https://nfpm.goreleaser.com) +- [OpenShift](https://www.openshift.com/) +- [Ory Hydra](https://github.com/ory/hydra) +- [Ory Kratos](https://github.com/ory/kratos) +- [Pixie](https://github.com/pixie-io/pixie) +- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) +- [Pouch](https://github.com/alibaba/pouch) +- [ProjectAtomic (enterprise)](https://www.projectatomic.io/) +- [Prototool](https://github.com/uber/prototool) +- [Pulumi](https://www.pulumi.com) +- [QRcp](https://github.com/claudiodangelis/qrcp) +- [Random](https://github.com/erdaltsksn/random) +- [Rclone](https://rclone.org/) +- [Scaleway CLI](https://github.com/scaleway/scaleway-cli) +- [Skaffold](https://skaffold.dev/) +- [Tendermint](https://github.com/tendermint/tendermint) +- [Twitch CLI](https://github.com/twitchdev/twitch-cli) +- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli) +- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework) +- [Werf](https://werf.io/) diff --git a/mantle/vendor/github.com/spf13/cobra/shell_completions.go b/mantle/vendor/github.com/spf13/cobra/shell_completions.go index ba0af9cb..d99bf91e 100644 --- a/mantle/vendor/github.com/spf13/cobra/shell_completions.go +++ b/mantle/vendor/github.com/spf13/cobra/shell_completions.go @@ -4,82 +4,81 @@ import ( "github.com/spf13/pflag" ) -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// MarkFlagRequired instructs the various shell completion implementations to +// prioritize the named flag when performing completion, // and causes your command to report an error if invoked without the flag. func (c *Command) MarkFlagRequired(name string) error { return MarkFlagRequired(c.Flags(), name) } -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, +// MarkPersistentFlagRequired instructs the various shell completion implementations to +// prioritize the named persistent flag when performing completion, // and causes your command to report an error if invoked without the flag. func (c *Command) MarkPersistentFlagRequired(name string) error { return MarkFlagRequired(c.PersistentFlags(), name) } -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// MarkFlagRequired instructs the various shell completion implementations to +// prioritize the named flag when performing completion, // and causes your command to report an error if invoked without the flag. func MarkFlagRequired(flags *pflag.FlagSet, name string) error { return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) } -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +// MarkFlagFilename instructs the various shell completion implementations to +// limit completions for the named flag to the specified file extensions. func (c *Command) MarkFlagFilename(name string, extensions ...string) error { return MarkFlagFilename(c.Flags(), name, extensions...) } // MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. -// Generated bash autocompletion will call the bash function f for the flag. +// The bash completion script will call the bash function f for the flag. +// +// This will only work for bash completion. +// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows +// to register a Go function which will work across all shells. func (c *Command) MarkFlagCustom(name string, f string) error { return MarkFlagCustom(c.Flags(), name, f) } // MarkPersistentFlagFilename instructs the various shell completion -// implementations to limit completions for this persistent flag to the -// specified extensions (patterns). -// -// Shell Completion compatibility matrix: bash, zsh +// implementations to limit completions for the named persistent flag to the +// specified file extensions. func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { return MarkFlagFilename(c.PersistentFlags(), name, extensions...) } // MarkFlagFilename instructs the various shell completion implementations to -// limit completions for this flag to the specified extensions (patterns). -// -// Shell Completion compatibility matrix: bash, zsh +// limit completions for the named flag to the specified file extensions. func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { return flags.SetAnnotation(name, BashCompFilenameExt, extensions) } -// MarkFlagCustom instructs the various shell completion implementations to -// limit completions for this flag to the specified extensions (patterns). +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// The bash completion script will call the bash function f for the flag. // -// Shell Completion compatibility matrix: bash, zsh +// This will only work for bash completion. +// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows +// to register a Go function which will work across all shells. func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { return flags.SetAnnotation(name, BashCompCustom, []string{f}) } // MarkFlagDirname instructs the various shell completion implementations to -// complete only directories with this named flag. -// -// Shell Completion compatibility matrix: zsh +// limit completions for the named flag to directory names. func (c *Command) MarkFlagDirname(name string) error { return MarkFlagDirname(c.Flags(), name) } // MarkPersistentFlagDirname instructs the various shell completion -// implementations to complete only directories with this persistent named flag. -// -// Shell Completion compatibility matrix: zsh +// implementations to limit completions for the named persistent flag to +// directory names. func (c *Command) MarkPersistentFlagDirname(name string) error { return MarkFlagDirname(c.PersistentFlags(), name) } // MarkFlagDirname instructs the various shell completion implementations to -// complete only directories with this specified flag. -// -// Shell Completion compatibility matrix: zsh +// limit completions for the named flag to directory names. func MarkFlagDirname(flags *pflag.FlagSet, name string) error { - zshPattern := "-(/)" - return flags.SetAnnotation(name, zshCompDirname, []string{zshPattern}) + return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{}) } diff --git a/mantle/vendor/github.com/spf13/cobra/shell_completions.md b/mantle/vendor/github.com/spf13/cobra/shell_completions.md new file mode 100644 index 00000000..1e2058ed --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/shell_completions.md @@ -0,0 +1,548 @@ +# Generating shell completions + +Cobra can generate shell completions for multiple shells. +The currently supported shells are: +- Bash +- Zsh +- fish +- PowerShell + +Cobra will automatically provide your program with a fully functional `completion` command, +similarly to how it provides the `help` command. + +## Creating your own completion command + +If you do not wish to use the default `completion` command, you can choose to +provide your own, which will take precedence over the default one. (This also provides +backwards-compatibility with programs that already have their own `completion` command.) + +If you are using the `cobra-cli` generator, +which can be found at [spf13/cobra-cli](https://github.com/spf13/cobra-cli), +you can create a completion command by running + +```bash +cobra-cli add completion +``` +and then modifying the generated `cmd/completion.go` file to look something like this +(writing the shell script to stdout allows the most flexible use): + +```go +var completionCmd = &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate completion script", + Long: fmt.Sprintf(`To load completions: + +Bash: + + $ source <(%[1]s completion bash) + + # To load completions for each session, execute once: + # Linux: + $ %[1]s completion bash > /etc/bash_completion.d/%[1]s + # macOS: + $ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s + +Zsh: + + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ %[1]s completion zsh > "${fpath[1]}/_%[1]s" + + # You will need to start a new shell for this setup to take effect. + +fish: + + $ %[1]s completion fish | source + + # To load completions for each session, execute once: + $ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +PowerShell: + + PS> %[1]s completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> %[1]s completion powershell > %[1]s.ps1 + # and source this file from your PowerShell profile. +`,cmd.Root().Name()), + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.ExactValidArgs(1), + Run: func(cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": + cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + } + }, +} +``` + +**Note:** The cobra generator may include messages printed to stdout, for example, if the config file is loaded; this will break the auto-completion script so must be removed. + +## Adapting the default completion command + +Cobra provides a few options for the default `completion` command. To configure such options you must set +the `CompletionOptions` field on the *root* command. + +To tell Cobra *not* to provide the default `completion` command: +``` +rootCmd.CompletionOptions.DisableDefaultCmd = true +``` + +To tell Cobra *not* to provide the user with the `--no-descriptions` flag to the completion sub-commands: +``` +rootCmd.CompletionOptions.DisableNoDescFlag = true +``` + +To tell Cobra to completely disable descriptions for completions: +``` +rootCmd.CompletionOptions.DisableDescriptions = true +``` + +# Customizing completions + +The generated completion scripts will automatically handle completing commands and flags. However, you can make your completions much more powerful by providing information to complete your program's nouns and flag values. + +## Completion of nouns + +### Static completion of nouns + +Cobra allows you to provide a pre-defined list of completion choices for your nouns using the `ValidArgs` field. +For example, if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. +Some simplified code from `kubectl get` looks like: + +```go +validArgs = []string{ "pod", "node", "service", "replicationcontroller" } + +cmd := &cobra.Command{ + Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", + Short: "Display one or many resources", + Long: get_long, + Example: get_example, + Run: func(cmd *cobra.Command, args []string) { + cobra.CheckErr(RunGet(f, out, cmd, args)) + }, + ValidArgs: validArgs, +} +``` + +Notice we put the `ValidArgs` field on the `get` sub-command. Doing so will give results like: + +```bash +$ kubectl get [tab][tab] +node pod replicationcontroller service +``` + +#### Aliases for nouns + +If your nouns have aliases, you can define them alongside `ValidArgs` using `ArgAliases`: + +```go +argAliases = []string { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } + +cmd := &cobra.Command{ + ... + ValidArgs: validArgs, + ArgAliases: argAliases +} +``` + +The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by +the completion algorithm if entered manually, e.g. in: + +```bash +$ kubectl get rc [tab][tab] +backend frontend database +``` + +Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of +replication controllers following `rc`. + +### Dynamic completion of nouns + +In some cases it is not possible to provide a list of completions in advance. Instead, the list of completions must be determined at execution-time. In a similar fashion as for static completions, you can use the `ValidArgsFunction` field to provide a Go function that Cobra will execute when it needs the list of completion choices for the nouns of a command. Note that either `ValidArgs` or `ValidArgsFunction` can be used for a single cobra command, but not both. +Simplified code from `helm status` looks like: + +```go +cmd := &cobra.Command{ + Use: "status RELEASE_NAME", + Short: "Display the status of the named release", + Long: status_long, + RunE: func(cmd *cobra.Command, args []string) { + RunGet(args[0]) + }, + ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + if len(args) != 0 { + return nil, cobra.ShellCompDirectiveNoFileComp + } + return getReleasesFromCluster(toComplete), cobra.ShellCompDirectiveNoFileComp + }, +} +``` +Where `getReleasesFromCluster()` is a Go function that obtains the list of current Helm releases running on the Kubernetes cluster. +Notice we put the `ValidArgsFunction` on the `status` sub-command. Let's assume the Helm releases on the cluster are: `harbor`, `notary`, `rook` and `thanos` then this dynamic completion will give results like: + +```bash +$ helm status [tab][tab] +harbor notary rook thanos +``` +You may have noticed the use of `cobra.ShellCompDirective`. These directives are bit fields allowing to control some shell completion behaviors for your particular completion. You can combine them with the bit-or operator such as `cobra.ShellCompDirectiveNoSpace | cobra.ShellCompDirectiveNoFileComp` +```go +// Indicates that the shell will perform its default behavior after completions +// have been provided (this implies none of the other directives). +ShellCompDirectiveDefault + +// Indicates an error occurred and completions should be ignored. +ShellCompDirectiveError + +// Indicates that the shell should not add a space after the completion, +// even if there is a single completion provided. +ShellCompDirectiveNoSpace + +// Indicates that the shell should not provide file completion even when +// no completion is provided. +ShellCompDirectiveNoFileComp + +// Indicates that the returned completions should be used as file extension filters. +// For example, to complete only files of the form *.json or *.yaml: +// return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt +// For flags, using MarkFlagFilename() and MarkPersistentFlagFilename() +// is a shortcut to using this directive explicitly. +// +ShellCompDirectiveFilterFileExt + +// Indicates that only directory names should be provided in file completion. +// For example: +// return nil, ShellCompDirectiveFilterDirs +// For flags, using MarkFlagDirname() is a shortcut to using this directive explicitly. +// +// To request directory names within another directory, the returned completions +// should specify a single directory name within which to search. For example, +// to complete directories within "themes/": +// return []string{"themes"}, ShellCompDirectiveFilterDirs +// +ShellCompDirectiveFilterDirs +``` + +***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function. + +#### Debugging + +Cobra achieves dynamic completion through the use of a hidden command called by the completion script. To debug your Go completion code, you can call this hidden command directly: +```bash +$ helm __complete status har +harbor +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +***Important:*** If the noun to complete is empty (when the user has not yet typed any letters of that noun), you must pass an empty parameter to the `__complete` command: +```bash +$ helm __complete status "" +harbor +notary +rook +thanos +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +Calling the `__complete` command directly allows you to run the Go debugger to troubleshoot your code. You can also add printouts to your code; Cobra provides the following functions to use for printouts in Go completion code: +```go +// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE +// is set to a file path) and optionally prints to stderr. +cobra.CompDebug(msg string, printToStdErr bool) { +cobra.CompDebugln(msg string, printToStdErr bool) + +// Prints to the completion script debug file (if BASH_COMP_DEBUG_FILE +// is set to a file path) and to stderr. +cobra.CompError(msg string) +cobra.CompErrorln(msg string) +``` +***Important:*** You should **not** leave traces that print directly to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned above. + +## Completions for flags + +### Mark flags as required + +Most of the time completions will only show sub-commands. But if a flag is required to make a sub-command work, you probably want it to show up when the user types [tab][tab]. You can mark a flag as 'Required' like so: + +```go +cmd.MarkFlagRequired("pod") +cmd.MarkFlagRequired("container") +``` + +and you'll get something like + +```bash +$ kubectl exec [tab][tab] +-c --container= -p --pod= +``` + +### Specify dynamic flag completion + +As for nouns, Cobra provides a way of defining dynamic completion of flags. To provide a Go function that Cobra will execute when it needs the list of completion choices for a flag, you must register the function using the `command.RegisterFlagCompletionFunc()` function. + +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"json", "table", "yaml"}, cobra.ShellCompDirectiveDefault +}) +``` +Notice that calling `RegisterFlagCompletionFunc()` is done through the `command` with which the flag is associated. In our example this dynamic completion will give results like so: + +```bash +$ helm status --output [tab][tab] +json table yaml +``` + +#### Debugging + +You can also easily debug your Go completion code for flags: +```bash +$ helm __complete status --output "" +json +table +yaml +:4 +Completion ended with directive: ShellCompDirectiveNoFileComp # This is on stderr +``` +***Important:*** You should **not** leave traces that print to stdout in your completion code as they will be interpreted as completion choices by the completion script. Instead, use the cobra-provided debugging traces functions mentioned further above. + +### Specify valid filename extensions for flags that take a filename + +To limit completions of flag values to file names with certain extensions you can either use the different `MarkFlagFilename()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterFileExt`, like so: +```go +flagName := "output" +cmd.MarkFlagFilename(flagName, "yaml", "json") +``` +or +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"yaml", "json"}, ShellCompDirectiveFilterFileExt}) +``` + +### Limit flag completions to directory names + +To limit completions of flag values to directory names you can either use the `MarkFlagDirname()` functions or a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs`, like so: +```go +flagName := "output" +cmd.MarkFlagDirname(flagName) +``` +or +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveFilterDirs +}) +``` +To limit completions of flag values to directory names *within another directory* you can use a combination of `RegisterFlagCompletionFunc()` and `ShellCompDirectiveFilterDirs` like so: +```go +flagName := "output" +cmd.RegisterFlagCompletionFunc(flagName, func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"themes"}, cobra.ShellCompDirectiveFilterDirs +}) +``` +### Descriptions for completions + +Cobra provides support for completion descriptions. Such descriptions are supported for each shell +(however, for bash, it is only available in the [completion V2 version](#bash-completion-v2)). +For commands and flags, Cobra will provide the descriptions automatically, based on usage information. +For example, using zsh: +``` +$ helm s[tab] +search -- search for a keyword in charts +show -- show information of a chart +status -- displays the status of the named release +``` +while using fish: +``` +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) +``` + +Cobra allows you to add descriptions to your own completions. Simply add the description text after each completion, following a `\t` separator. This technique applies to completions returned by `ValidArgs`, `ValidArgsFunction` and `RegisterFlagCompletionFunc()`. For example: +```go +ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return []string{"harbor\tAn image registry", "thanos\tLong-term metrics"}, cobra.ShellCompDirectiveNoFileComp +}} +``` +or +```go +ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"} +``` +## Bash completions + +### Dependencies + +The bash completion script generated by Cobra requires the `bash_completion` package. You should update the help text of your completion command to show how to install the `bash_completion` package ([Kubectl docs](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)) + +### Aliases + +You can also configure `bash` aliases for your program and they will also support completions. + +```bash +alias aliasname=origcommand +complete -o default -F __start_origcommand aliasname + +# and now when you run `aliasname` completion will make +# suggestions as it did for `origcommand`. + +$ aliasname +completion firstcommand secondcommand +``` +### Bash legacy dynamic completions + +For backward compatibility, Cobra still supports its bash legacy dynamic completion solution. +Please refer to [Bash Completions](bash_completions.md) for details. + +### Bash completion V2 + +Cobra provides two versions for bash completion. The original bash completion (which started it all!) can be used by calling +`GenBashCompletion()` or `GenBashCompletionFile()`. + +A new V2 bash completion version is also available. This version can be used by calling `GenBashCompletionV2()` or +`GenBashCompletionFileV2()`. The V2 version does **not** support the legacy dynamic completion +(see [Bash Completions](bash_completions.md)) but instead works only with the Go dynamic completion +solution described in this document. +Unless your program already uses the legacy dynamic completion solution, it is recommended that you use the bash +completion V2 solution which provides the following extra features: +- Supports completion descriptions (like the other shells) +- Small completion script of less than 300 lines (v1 generates scripts of thousands of lines; `kubectl` for example has a bash v1 completion script of over 13K lines) +- Streamlined user experience thanks to a completion behavior aligned with the other shells + +`Bash` completion V2 supports descriptions for completions. When calling `GenBashCompletionV2()` or `GenBashCompletionFileV2()` +you must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra +will provide the description automatically based on usage information. You can choose to make this option configurable by +your users. + +``` +# With descriptions +$ helm s[tab][tab] +search (search for a keyword in charts) status (display the status of the named release) +show (show information of a chart) + +# Without descriptions +$ helm s[tab][tab] +search show status +``` +**Note**: Cobra's default `completion` command uses bash completion V2. If for some reason you need to use bash completion V1, you will need to implement your own `completion` command. +## Zsh completions + +Cobra supports native zsh completion generated from the root `cobra.Command`. +The generated completion script should be put somewhere in your `$fpath` and be named +`_`. You will need to start a new shell for the completions to become available. + +Zsh supports descriptions for completions. Cobra will provide the description automatically, +based on usage information. Cobra provides a way to completely disable such descriptions by +using `GenZshCompletionNoDesc()` or `GenZshCompletionFileNoDesc()`. You can choose to make +this a configurable option to your users. +``` +# With descriptions +$ helm s[tab] +search -- search for a keyword in charts +show -- show information of a chart +status -- displays the status of the named release + +# Without descriptions +$ helm s[tab] +search show status +``` +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. + +### Limitations + +* Custom completions implemented in Bash scripting (legacy) are not supported and will be ignored for `zsh` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `zsh`. + * You should instead use `RegisterFlagCompletionFunc()`. + +### Zsh completions standardization + +Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backward-compatible, some small changes in behavior were introduced. +Please refer to [Zsh Completions](zsh_completions.md) for details. + +## fish completions + +Cobra supports native fish completions generated from the root `cobra.Command`. You can use the `command.GenFishCompletion()` or `command.GenFishCompletionFile()` functions. You must provide these functions with a parameter indicating if the completions should be annotated with a description; Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. +``` +# With descriptions +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) + +# Without descriptions +$ helm s[tab] +search show status +``` +*Note*: Because of backward-compatibility requirements, we were forced to have a different API to disable completion descriptions between `zsh` and `fish`. + +### Limitations + +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `fish` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `fish`. + * You should instead use `RegisterFlagCompletionFunc()`. +* The following flag completion annotations are not supported and will be ignored for `fish`: + * `BashCompFilenameExt` (filtering by file extension) + * `BashCompSubdirsInDir` (filtering by directory) +* The functions corresponding to the above annotations are consequently not supported and will be ignored for `fish`: + * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) + * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) +* Similarly, the following completion directives are not supported and will be ignored for `fish`: + * `ShellCompDirectiveFilterFileExt` (filtering by file extension) + * `ShellCompDirectiveFilterDirs` (filtering by directory) + +## PowerShell completions + +Cobra supports native PowerShell completions generated from the root `cobra.Command`. You can use the `command.GenPowerShellCompletion()` or `command.GenPowerShellCompletionFile()` functions. To include descriptions use `command.GenPowerShellCompletionWithDesc()` and `command.GenPowerShellCompletionFileWithDesc()`. Cobra will provide the description automatically based on usage information. You can choose to make this option configurable by your users. + +The script is designed to support all three PowerShell completion modes: + +* TabCompleteNext (default windows style - on each key press the next option is displayed) +* Complete (works like bash) +* MenuComplete (works like zsh) + +You set the mode with `Set-PSReadLineKeyHandler -Key Tab -Function `. Descriptions are only displayed when using the `Complete` or `MenuComplete` mode. + +Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles. + +``` +# With descriptions and Mode 'Complete' +$ helm s[tab] +search (search for a keyword in charts) show (show information of a chart) status (displays the status of the named release) + +# With descriptions and Mode 'MenuComplete' The description of the current selected value will be displayed below the suggestions. +$ helm s[tab] +search show status + +search for a keyword in charts + +# Without descriptions +$ helm s[tab] +search show status +``` + +### Limitations + +* Custom completions implemented in bash scripting (legacy) are not supported and will be ignored for `powershell` (including the use of the `BashCompCustom` flag annotation). + * You should instead use `ValidArgsFunction` and `RegisterFlagCompletionFunc()` which are portable to the different shells (`bash`, `zsh`, `fish`, `powershell`). +* The function `MarkFlagCustom()` is not supported and will be ignored for `powershell`. + * You should instead use `RegisterFlagCompletionFunc()`. +* The following flag completion annotations are not supported and will be ignored for `powershell`: + * `BashCompFilenameExt` (filtering by file extension) + * `BashCompSubdirsInDir` (filtering by directory) +* The functions corresponding to the above annotations are consequently not supported and will be ignored for `powershell`: + * `MarkFlagFilename()` and `MarkPersistentFlagFilename()` (filtering by file extension) + * `MarkFlagDirname()` and `MarkPersistentFlagDirname()` (filtering by directory) +* Similarly, the following completion directives are not supported and will be ignored for `powershell`: + * `ShellCompDirectiveFilterFileExt` (filtering by file extension) + * `ShellCompDirectiveFilterDirs` (filtering by directory) diff --git a/mantle/vendor/github.com/spf13/cobra/user_guide.md b/mantle/vendor/github.com/spf13/cobra/user_guide.md new file mode 100644 index 00000000..5a7acf88 --- /dev/null +++ b/mantle/vendor/github.com/spf13/cobra/user_guide.md @@ -0,0 +1,666 @@ +# User Guide + +While you are welcome to provide your own organization, typically a Cobra-based +application will follow the following organizational structure: + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +## Using the Cobra Generator + +Cobra-CLI is its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +For complete details on using the Cobra generator, please refer to [The Cobra-CLI Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) + +## Using the Cobra Library + +To manually implement Cobra you need to create a bare main.go file and a rootCmd file. +You will optionally provide additional commands as you see fit. + +### Create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at https://gohugo.io/documentation/`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} +``` + +You will additionally define flags and handle configuration in your init() function. + +For example cmd/root.go: + +```go +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var ( + // Used for flags. + cfgFile string + userLicense string + + rootCmd = &cobra.Command{ + Use: "cobra-cli", + Short: "A generator for Cobra based Applications", + Long: `Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + } +) + +// Execute executes the root command. +func Execute() error { + return rootCmd.Execute() +} + +func init() { + cobra.OnInitialize(initConfig) + + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "name of license for the project") + rootCmd.PersistentFlags().Bool("viper", true, "use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") + + rootCmd.AddCommand(addCmd) + rootCmd.AddCommand(initCmd) +} + +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := os.UserHomeDir() + cobra.CheckErr(err) + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigType("yaml") + viper.SetConfigName(".cobra") + } + + viper.AutomaticEnv() + + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: to initialize Cobra. + +```go +package main + +import ( + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Returning and handling errors + +If you wish to return an error to the caller of a command, `RunE` can be used. + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(tryCmd) +} + +var tryCmd = &cobra.Command{ + Use: "try", + Short: "Try and possibly fail at something", + RunE: func(cmd *cobra.Command, args []string) error { + if err := someFunc(); err != nil { + return err + } + return nil + }, +} +``` + +The error can then be caught at the execute function call. + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent', meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally, which will only apply to that specific command. + +```go +localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + +### Local Flag on Parent Commands + +By default, Cobra only parses local flags on the target command, and any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren`, Cobra will +parse local flags on each command before executing the target command. + +```go +command := cobra.Command{ + Use: "print [OPTIONS] [COMMANDS]", + TraverseChildren: true, +} +``` + +### Bind Flags with Config + +You can also bind your flags with [viper](https://github.com/spf13/viper): +```go +var author string + +func init() { + rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) +} +``` + +In this example, the persistent flag `author` is bound with `viper`. +**Note**: the variable `author` will not be set to the value from config, +when the `--author` flag is provided by user. + +More in [viper documentation](https://github.com/spf13/viper#working-with-flags). + +### Required flags + +Flags are optional by default. If instead you wish your command to report an error +when a flag has not been set, mark it as required: +```go +rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkFlagRequired("region") +``` + +Or, for persistent flags: +```go +rootCmd.PersistentFlags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkPersistentFlagRequired("region") +``` + +### Flag Groups + +If you have different flags that must be provided together (e.g. if they provide the `--username` flag they MUST provide the `--password` flag as well) then +Cobra can enforce that requirement: +```go +rootCmd.Flags().StringVarP(&u, "username", "u", "", "Username (required if password is set)") +rootCmd.Flags().StringVarP(&pw, "password", "p", "", "Password (required if username is set)") +rootCmd.MarkFlagsRequiredTogether("username", "password") +``` + +You can also prevent different flags from being provided together if they represent mutually +exclusive options such as specifying an output format as either `--json` or `--yaml` but never both: +```go +rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON") +rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML") +rootCmd.MarkFlagsMutuallyExclusive("json", "yaml") +``` + +In both of these cases: + - both local and persistent flags can be used + - **NOTE:** the group is only enforced on commands where every flag is defined + - a flag may appear in multiple groups + - a group may contain any number of flags + +## Positional and Custom Arguments + +Validation of positional arguments can be specified using the `Args` field of `Command`. +If `Args` is undefined or `nil`, it defaults to `ArbitraryArgs`. + +The following validators are built in: + +- `NoArgs` - the command will report an error if there are any positional args. +- `ArbitraryArgs` - the command will accept any args. +- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. +- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. +- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. +- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. +- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command` +- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. +- `MatchAll(pargs ...PositionalArgs)` - enables combining existing checks with arbitrary other checks (e.g. you want to check the ExactArgs length along with other qualities). + +An example of setting the custom validator: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("requires a color argument") + } + if myapp.IsValidColor(args[0]) { + return nil + } + return fmt.Errorf("invalid color specified: %s", args[0]) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable, meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. +For many years people have printed back to the screen.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. +Echo works a lot like print, except it has a child command.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Echo: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing +a count and a string.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](https://gohugo.io/). + +## Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + $ cobra help + + Cobra is a CLI library for Go that empowers applications. + This application is a tool to generate the needed files + to quickly create a Cobra application. + + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use +with following functions: + +```go +cmd.SetHelpCommand(cmd *Command) +cmd.SetHelpFunc(f func(*Command, []string)) +cmd.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage Message + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + $ cobra --invalid + Error: unknown flag: --invalid + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. +Like help, the function and template are overridable through public methods: + +```go +cmd.SetUsageFunc(f func(*Command) error) +cmd.SetUsageTemplate(s string) +``` + +## Version Flag + +Cobra adds a top-level '--version' flag if the Version field is set on the root command. +Running an application with the '--version' flag will print the version to stdout using +the version template. The template can be customized using the +`cmd.SetVersionTemplate(s string)` function. + +## PreRun and PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + rootCmd.Execute() + fmt.Println() + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + rootCmd.Execute() +} +``` + +Output: +``` +Inside rootCmd PersistentPreRun with args: [] +Inside rootCmd PreRun with args: [] +Inside rootCmd Run with args: [] +Inside rootCmd PostRun with args: [] +Inside rootCmd PersistentPostRun with args: [] + +Inside rootCmd PersistentPreRun with args: [arg1 arg2] +Inside subCmd PreRun with args: [arg1 arg2] +Inside subCmd Run with args: [arg1 arg2] +Inside subCmd PostRun with args: [arg1 arg2] +Inside subCmd PersistentPostRun with args: [arg1 arg2] +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating documentation for your command + +Cobra can generate documentation based on subcommands, flags, etc. Read more about it in the [docs generation documentation](doc/README.md). + +## Generating shell completions + +Cobra can generate a shell-completion file for the following shells: bash, zsh, fish, PowerShell. If you add more information to your commands, these completions can be amazingly powerful and flexible. Read more about it in [Shell Completions](shell_completions.md). + +## Providing Active Help + +Cobra makes use of the shell-completion system to define a framework allowing you to provide Active Help to your users. Active Help are messages (hints, warnings, etc) printed as the program is being used. Read more about it in [Active Help](active_help.md). diff --git a/mantle/vendor/github.com/spf13/cobra/zsh_completions.go b/mantle/vendor/github.com/spf13/cobra/zsh_completions.go index 12755482..65cd94c6 100644 --- a/mantle/vendor/github.com/spf13/cobra/zsh_completions.go +++ b/mantle/vendor/github.com/spf13/cobra/zsh_completions.go @@ -1,336 +1,287 @@ package cobra import ( - "encoding/json" + "bytes" "fmt" "io" "os" - "sort" - "strings" - "text/template" - - "github.com/spf13/pflag" -) - -const ( - zshCompArgumentAnnotation = "cobra_annotations_zsh_completion_argument_annotation" - zshCompArgumentFilenameComp = "cobra_annotations_zsh_completion_argument_file_completion" - zshCompArgumentWordComp = "cobra_annotations_zsh_completion_argument_word_completion" - zshCompDirname = "cobra_annotations_zsh_dirname" -) - -var ( - zshCompFuncMap = template.FuncMap{ - "genZshFuncName": zshCompGenFuncName, - "extractFlags": zshCompExtractFlag, - "genFlagEntryForZshArguments": zshCompGenFlagEntryForArguments, - "extractArgsCompletions": zshCompExtractArgumentCompletionHintsForRendering, - } - zshCompletionText = ` -{{/* should accept Command (that contains subcommands) as parameter */}} -{{define "argumentsC" -}} -{{ $cmdPath := genZshFuncName .}} -function {{$cmdPath}} { - local -a commands - - _arguments -C \{{- range extractFlags .}} - {{genFlagEntryForZshArguments .}} \{{- end}} - "1: :->cmnds" \ - "*::arg:->args" - - case $state in - cmnds) - commands=({{range .Commands}}{{if not .Hidden}} - "{{.Name}}:{{.Short}}"{{end}}{{end}} - ) - _describe "command" commands - ;; - esac - - case "$words[1]" in {{- range .Commands}}{{if not .Hidden}} - {{.Name}}) - {{$cmdPath}}_{{.Name}} - ;;{{end}}{{end}} - esac -} -{{range .Commands}}{{if not .Hidden}} -{{template "selectCmdTemplate" .}} -{{- end}}{{end}} -{{- end}} - -{{/* should accept Command without subcommands as parameter */}} -{{define "arguments" -}} -function {{genZshFuncName .}} { -{{" _arguments"}}{{range extractFlags .}} \ - {{genFlagEntryForZshArguments . -}} -{{end}}{{range extractArgsCompletions .}} \ - {{.}}{{end}} -} -{{end}} - -{{/* dispatcher for commands with or without subcommands */}} -{{define "selectCmdTemplate" -}} -{{if .Hidden}}{{/* ignore hidden*/}}{{else -}} -{{if .Commands}}{{template "argumentsC" .}}{{else}}{{template "arguments" .}}{{end}} -{{- end}} -{{- end}} - -{{/* template entry point */}} -{{define "Main" -}} -#compdef _{{.Name}} {{.Name}} - -{{template "selectCmdTemplate" .}} -{{end}} -` ) -// zshCompArgsAnnotation is used to encode/decode zsh completion for -// arguments to/from Command.Annotations. -type zshCompArgsAnnotation map[int]zshCompArgHint - -type zshCompArgHint struct { - // Indicates the type of the completion to use. One of: - // zshCompArgumentFilenameComp or zshCompArgumentWordComp - Tipe string `json:"type"` - - // A value for the type above (globs for file completion or words) - Options []string `json:"options"` -} - -// GenZshCompletionFile generates zsh completion file. +// GenZshCompletionFile generates zsh completion file including descriptions. func (c *Command) GenZshCompletionFile(filename string) error { - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - return c.GenZshCompletion(outFile) + return c.genZshCompletionFile(filename, true) } -// GenZshCompletion generates a zsh completion file and writes to the passed -// writer. The completion always run on the root command regardless of the -// command it was called from. +// GenZshCompletion generates zsh completion file including descriptions +// and writes it to the passed writer. func (c *Command) GenZshCompletion(w io.Writer) error { - tmpl, err := template.New("Main").Funcs(zshCompFuncMap).Parse(zshCompletionText) - if err != nil { - return fmt.Errorf("error creating zsh completion template: %v", err) - } - return tmpl.Execute(w, c.Root()) + return c.genZshCompletion(w, true) } -// MarkZshCompPositionalArgumentFile marks the specified argument (first -// argument is 1) as completed by file selection. patterns (e.g. "*.txt") are -// optional - if not provided the completion will search for all files. -func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { - if argPosition < 1 { - return fmt.Errorf("Invalid argument position (%d)", argPosition) - } - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return err - } - if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) { - return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition) - } - annotation[argPosition] = zshCompArgHint{ - Tipe: zshCompArgumentFilenameComp, - Options: patterns, - } - return c.zshCompSetArgsAnnotations(annotation) +// GenZshCompletionFileNoDesc generates zsh completion file without descriptions. +func (c *Command) GenZshCompletionFileNoDesc(filename string) error { + return c.genZshCompletionFile(filename, false) } -// MarkZshCompPositionalArgumentWords marks the specified positional argument -// (first argument is 1) as completed by the provided words. At east one word -// must be provided, spaces within words will be offered completion with -// "word\ word". -func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { - if argPosition < 1 { - return fmt.Errorf("Invalid argument position (%d)", argPosition) - } - if len(words) == 0 { - return fmt.Errorf("Trying to set empty word list for positional argument %d", argPosition) - } - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return err - } - if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) { - return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition) - } - annotation[argPosition] = zshCompArgHint{ - Tipe: zshCompArgumentWordComp, - Options: words, - } - return c.zshCompSetArgsAnnotations(annotation) +// GenZshCompletionNoDesc generates zsh completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenZshCompletionNoDesc(w io.Writer) error { + return c.genZshCompletion(w, false) } -func zshCompExtractArgumentCompletionHintsForRendering(c *Command) ([]string, error) { - var result []string - annotation, err := c.zshCompGetArgsAnnotations() - if err != nil { - return nil, err - } - for k, v := range annotation { - s, err := zshCompRenderZshCompArgHint(k, v) - if err != nil { - return nil, err - } - result = append(result, s) - } - if len(c.ValidArgs) > 0 { - if _, positionOneExists := annotation[1]; !positionOneExists { - s, err := zshCompRenderZshCompArgHint(1, zshCompArgHint{ - Tipe: zshCompArgumentWordComp, - Options: c.ValidArgs, - }) - if err != nil { - return nil, err - } - result = append(result, s) - } - } - sort.Strings(result) - return result, nil -} - -func zshCompRenderZshCompArgHint(i int, z zshCompArgHint) (string, error) { - switch t := z.Tipe; t { - case zshCompArgumentFilenameComp: - var globs []string - for _, g := range z.Options { - globs = append(globs, fmt.Sprintf(`-g "%s"`, g)) - } - return fmt.Sprintf(`'%d: :_files %s'`, i, strings.Join(globs, " ")), nil - case zshCompArgumentWordComp: - var words []string - for _, w := range z.Options { - words = append(words, fmt.Sprintf("%q", w)) - } - return fmt.Sprintf(`'%d: :(%s)'`, i, strings.Join(words, " ")), nil - default: - return "", fmt.Errorf("Invalid zsh argument completion annotation: %s", t) - } -} - -func (c *Command) zshcompArgsAnnotationnIsDuplicatePosition(annotation zshCompArgsAnnotation, position int) bool { - _, dup := annotation[position] - return dup -} - -func (c *Command) zshCompGetArgsAnnotations() (zshCompArgsAnnotation, error) { - annotation := make(zshCompArgsAnnotation) - annotationString, ok := c.Annotations[zshCompArgumentAnnotation] - if !ok { - return annotation, nil - } - err := json.Unmarshal([]byte(annotationString), &annotation) - if err != nil { - return annotation, fmt.Errorf("Error unmarshaling zsh argument annotation: %v", err) - } - return annotation, nil -} - -func (c *Command) zshCompSetArgsAnnotations(annotation zshCompArgsAnnotation) error { - jsn, err := json.Marshal(annotation) - if err != nil { - return fmt.Errorf("Error marshaling zsh argument annotation: %v", err) - } - if c.Annotations == nil { - c.Annotations = make(map[string]string) - } - c.Annotations[zshCompArgumentAnnotation] = string(jsn) +// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was +// not consistent with Bash completion. It has therefore been disabled. +// Instead, when no other completion is specified, file completion is done by +// default for every argument. One can disable file completion on a per-argument +// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. +// To achieve file extension filtering, one can use ValidArgsFunction and +// ShellCompDirectiveFilterFileExt. +// +// Deprecated +func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { return nil } -func zshCompGenFuncName(c *Command) string { - if c.HasParent() { - return zshCompGenFuncName(c.Parent()) + "_" + c.Name() - } - return "_" + c.Name() -} - -func zshCompExtractFlag(c *Command) []*pflag.Flag { - var flags []*pflag.Flag - c.LocalFlags().VisitAll(func(f *pflag.Flag) { - if !f.Hidden { - flags = append(flags, f) - } - }) - c.InheritedFlags().VisitAll(func(f *pflag.Flag) { - if !f.Hidden { - flags = append(flags, f) - } - }) - return flags -} - -// zshCompGenFlagEntryForArguments returns an entry that matches _arguments -// zsh-completion parameters. It's too complicated to generate in a template. -func zshCompGenFlagEntryForArguments(f *pflag.Flag) string { - if f.Name == "" || f.Shorthand == "" { - return zshCompGenFlagEntryForSingleOptionFlag(f) - } - return zshCompGenFlagEntryForMultiOptionFlag(f) +// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore +// been disabled. +// To achieve the same behavior across all shells, one can use +// ValidArgs (for the first argument only) or ValidArgsFunction for +// any argument (can include the first one also). +// +// Deprecated +func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { + return nil } -func zshCompGenFlagEntryForSingleOptionFlag(f *pflag.Flag) string { - var option, multiMark, extras string - - if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) { - multiMark = "*" - } - - option = "--" + f.Name - if option == "--" { - option = "-" + f.Shorthand +func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err } - extras = zshCompGenFlagEntryExtras(f) + defer outFile.Close() - return fmt.Sprintf(`'%s%s[%s]%s'`, multiMark, option, zshCompQuoteFlagDescription(f.Usage), extras) + return c.genZshCompletion(outFile, includeDesc) } -func zshCompGenFlagEntryForMultiOptionFlag(f *pflag.Flag) string { - var options, parenMultiMark, curlyMultiMark, extras string - - if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) { - parenMultiMark = "*" - curlyMultiMark = "\\*" - } - - options = fmt.Sprintf(`'(%s-%s %s--%s)'{%s-%s,%s--%s}`, - parenMultiMark, f.Shorthand, parenMultiMark, f.Name, curlyMultiMark, f.Shorthand, curlyMultiMark, f.Name) - extras = zshCompGenFlagEntryExtras(f) - - return fmt.Sprintf(`%s'[%s]%s'`, options, zshCompQuoteFlagDescription(f.Usage), extras) +func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genZshComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err } -func zshCompGenFlagEntryExtras(f *pflag.Flag) string { - if f.NoOptDefVal != "" { - return "" +func genZshComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd } + WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s - extras := ":" // allow options for flag (even without assistance) - for key, values := range f.Annotations { - switch key { - case zshCompDirname: - extras = fmt.Sprintf(":filename:_files -g %q", values[0]) - case BashCompFilenameExt: - extras = ":filename:_files" - for _, pattern := range values { - extras = extras + fmt.Sprintf(` -g "%s"`, pattern) - } - } - } +# zsh completion for %-36[1]s -*- shell-script -*- - return extras +__%[1]s_debug() +{ + local file="$BASH_COMP_DEBUG_FILE" + if [[ -n ${file} ]]; then + echo "$*" >> "${file}" + fi } -func zshCompFlagCouldBeSpecifiedMoreThenOnce(f *pflag.Flag) bool { - return strings.Contains(f.Value.Type(), "Slice") || - strings.Contains(f.Value.Type(), "Array") +_%[1]s() +{ + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace + local -a completions + + __%[1]s_debug "\n========= starting completion logic ==========" + __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CURRENT location, so we need + # to truncate the command-line ($words) up to the $CURRENT location. + # (We cannot use $CURSOR as its value does not work when a command is an alias.) + words=("${=words[1,CURRENT]}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + lastParam=${words[-1]} + lastChar=${lastParam[-1]} + __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}" + + # For zsh, when completing a flag with an = (e.g., %[1]s -n=) + # completions must be prefixed with the flag + setopt local_options BASH_REMATCH + if [[ "${lastParam}" =~ '-.*=' ]]; then + # We are dealing with a flag with an = + flagPrefix="-P ${BASH_REMATCH}" + fi + + # Prepare the command to obtain completions + requestComp="${words[1]} %[2]s ${words[2,-1]}" + if [ "${lastChar}" = "" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go completion code. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} \"\"" + fi + + __%[1]s_debug "About to call: eval ${requestComp}" + + # Use eval to handle any environment variables and such + out=$(eval ${requestComp} 2>/dev/null) + __%[1]s_debug "completion output: ${out}" + + # Extract the directive integer following a : from the last line + local lastLine + while IFS='\n' read -r line; do + lastLine=${line} + done < <(printf "%%s\n" "${out[@]}") + __%[1]s_debug "last line: ${lastLine}" + + if [ "${lastLine[1]}" = : ]; then + directive=${lastLine[2,-1]} + # Remove the directive including the : and the newline + local suffix + (( suffix=${#lastLine}+2)) + out=${out[1,-$suffix]} + else + # There is no directive specified. Leave $out as is. + __%[1]s_debug "No directive found. Setting do default" + directive=0 + fi + + __%[1]s_debug "directive: ${directive}" + __%[1]s_debug "completions: ${out}" + __%[1]s_debug "flagPrefix: ${flagPrefix}" + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + __%[1]s_debug "Completion received error. Ignoring completions." + return + fi + + local activeHelpMarker="%[8]s" + local endIndex=${#activeHelpMarker} + local startIndex=$((${#activeHelpMarker}+1)) + local hasActiveHelp=0 + while IFS='\n' read -r comp; do + # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker) + if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then + __%[1]s_debug "ActiveHelp found: $comp" + comp="${comp[$startIndex,-1]}" + if [ -n "$comp" ]; then + compadd -x "${comp}" + __%[1]s_debug "ActiveHelp will need delimiter" + hasActiveHelp=1 + fi + + continue + fi + + if [ -n "$comp" ]; then + # If requested, completions are returned with a description. + # The description is preceded by a TAB character. + # For zsh's _describe, we need to use a : instead of a TAB. + # We first need to escape any : as part of the completion itself. + comp=${comp//:/\\:} + + local tab="$(printf '\t')" + comp=${comp//$tab/:} + + __%[1]s_debug "Adding completion: ${comp}" + completions+=${comp} + lastComp=$comp + fi + done < <(printf "%%s\n" "${out[@]}") + + # Add a delimiter after the activeHelp statements, but only if: + # - there are completions following the activeHelp statements, or + # - file completion will be performed (so there will be choices after the activeHelp) + if [ $hasActiveHelp -eq 1 ]; then + if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then + __%[1]s_debug "Adding activeHelp delimiter" + compadd -x "--" + hasActiveHelp=0 + fi + fi + + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + __%[1]s_debug "Activating nospace." + noSpace="-S ''" + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local filteringCmd + filteringCmd='_files' + for filter in ${completions[@]}; do + if [ ${filter[1]} != '*' ]; then + # zsh requires a glob pattern to do file filtering + filter="\*.$filter" + fi + filteringCmd+=" -g $filter" + done + filteringCmd+=" ${flagPrefix}" + + __%[1]s_debug "File filtering command: $filteringCmd" + _arguments '*:filename:'"$filteringCmd" + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + local subdir + subdir="${completions[1]}" + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "${subdir}" >/dev/null 2>&1 + else + __%[1]s_debug "Listing directories in ." + fi + + local result + _arguments '*:dirname:_files -/'" ${flagPrefix}" + result=$? + if [ -n "$subdir" ]; then + popd >/dev/null 2>&1 + fi + return $result + else + __%[1]s_debug "Calling _describe" + if eval _describe "completions" completions $flagPrefix $noSpace; then + __%[1]s_debug "_describe found some completions" + + # Return the success of having called _describe + return 0 + else + __%[1]s_debug "_describe did not find completions." + __%[1]s_debug "Checking if we should do file completion." + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + __%[1]s_debug "deactivating file completion" + + # We must return an error code here to let zsh know that there were no + # completions found by _describe; this is what will trigger other + # matching algorithms to attempt to find completions. + # For example zsh can match letters in the middle of words. + return 1 + else + # Perform file completion + __%[1]s_debug "Activating file completion" + + # We must return the result of this command, so it must be the + # last command, or else we must store its result to return it. + _arguments '*:filename:_files'" ${flagPrefix}" + fi + fi + fi } -func zshCompQuoteFlagDescription(s string) string { - return strings.Replace(s, "'", `'\''`, -1) +# don't run the completion function when being source-ed or eval-ed +if [ "$funcstack[1]" = "_%[1]s" ]; then + _%[1]s +fi +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, + activeHelpMarker)) } diff --git a/mantle/vendor/github.com/spf13/cobra/zsh_completions.md b/mantle/vendor/github.com/spf13/cobra/zsh_completions.md index df9c2eac..7cff6178 100644 --- a/mantle/vendor/github.com/spf13/cobra/zsh_completions.md +++ b/mantle/vendor/github.com/spf13/cobra/zsh_completions.md @@ -1,39 +1,48 @@ -## Generating Zsh Completion for your cobra.Command - -Cobra supports native Zsh completion generated from the root `cobra.Command`. -The generated completion script should be put somewhere in your `$fpath` named -`_`. - -### What's Supported - -* Completion for all non-hidden subcommands using their `.Short` description. -* Completion for all non-hidden flags using the following rules: - * Filename completion works by marking the flag with `cmd.MarkFlagFilename...` - family of commands. - * The requirement for argument to the flag is decided by the `.NoOptDefVal` - flag value - if it's empty then completion will expect an argument. - * Flags of one of the various `*Array` and `*Slice` types supports multiple - specifications (with or without argument depending on the specific type). -* Completion of positional arguments using the following rules: - * Argument position for all options below starts at `1`. If argument position - `0` is requested it will raise an error. - * Use `command.MarkZshCompPositionalArgumentFile` to complete filenames. Glob - patterns (e.g. `"*.log"`) are optional - if not specified it will offer to - complete all file types. - * Use `command.MarkZshCompPositionalArgumentWords` to offer specific words for - completion. At least one word is required. - * It's possible to specify completion for some arguments and leave some - unspecified (e.g. offer words for second argument but nothing for first - argument). This will cause no completion for first argument but words - completion for second argument. - * If no argument completion was specified for 1st argument (but optionally was - specified for 2nd) and the command has `ValidArgs` it will be used as - completion options for 1st argument. - * Argument completions only offered for commands with no subcommands. - -### What's not yet Supported - -* Custom completion scripts are not supported yet (We should probably create zsh - specific one, doesn't make sense to re-use the bash one as the functions will - be different). -* Whatever other feature you're looking for and doesn't exist :) +## Generating Zsh Completion For Your cobra.Command + +Please refer to [Shell Completions](shell_completions.md) for details. + +## Zsh completions standardization + +Cobra 1.1 standardized its zsh completion support to align it with its other shell completions. Although the API was kept backwards-compatible, some small changes in behavior were introduced. + +### Deprecation summary + +See further below for more details on these deprecations. + +* `cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` is no longer needed. It is therefore **deprecated** and silently ignored. +* `cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` is **deprecated** and silently ignored. + * Instead use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt`. +* `cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored. + * Instead use `ValidArgsFunction`. + +### Behavioral changes + +**Noun completion** +|Old behavior|New behavior| +|---|---| +|No file completion by default (opposite of bash)|File completion by default; use `ValidArgsFunction` with `ShellCompDirectiveNoFileComp` to turn off file completion on a per-argument basis| +|Completion of flag names without the `-` prefix having been typed|Flag names are only completed if the user has typed the first `-`| +`cmd.MarkZshCompPositionalArgumentFile(pos, []string{})` used to turn on file completion on a per-argument position basis|File completion for all arguments by default; `cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored| +|`cmd.MarkZshCompPositionalArgumentFile(pos, glob[])` used to turn on file completion **with glob filtering** on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentFile()` is **deprecated** and silently ignored; use `ValidArgsFunction` with `ShellCompDirectiveFilterFileExt` for file **extension** filtering (not full glob filtering)| +|`cmd.MarkZshCompPositionalArgumentWords(pos, words[])` used to provide completion choices on a per-argument position basis (zsh-specific)|`cmd.MarkZshCompPositionalArgumentWords()` is **deprecated** and silently ignored; use `ValidArgsFunction` to achieve the same behavior| + +**Flag-value completion** + +|Old behavior|New behavior| +|---|---| +|No file completion by default (opposite of bash)|File completion by default; use `RegisterFlagCompletionFunc()` with `ShellCompDirectiveNoFileComp` to turn off file completion| +|`cmd.MarkFlagFilename(flag, []string{})` and similar used to turn on file completion|File completion by default; `cmd.MarkFlagFilename(flag, []string{})` no longer needed in this context and silently ignored| +|`cmd.MarkFlagFilename(flag, glob[])` used to turn on file completion **with glob filtering** (syntax of `[]string{"*.yaml", "*.yml"}` incompatible with bash)|Will continue to work, however, support for bash syntax is added and should be used instead so as to work for all shells (`[]string{"yaml", "yml"}`)| +|`cmd.MarkFlagDirname(flag)` only completes directories (zsh-specific)|Has been added for all shells| +|Completion of a flag name does not repeat, unless flag is of type `*Array` or `*Slice` (not supported by bash)|Retained for `zsh` and added to `fish`| +|Completion of a flag name does not provide the `=` form (unlike bash)|Retained for `zsh` and added to `fish`| + +**Improvements** + +* Custom completion support (`ValidArgsFunction` and `RegisterFlagCompletionFunc()`) +* File completion by default if no other completions found +* Handling of required flags +* File extension filtering no longer mutually exclusive with bash usage +* Completion of directory names *within* another directory +* Support for `=` form of flags diff --git a/mantle/vendor/github.com/spf13/pflag/go.mod b/mantle/vendor/github.com/spf13/pflag/go.mod deleted file mode 100644 index b2287eec..00000000 --- a/mantle/vendor/github.com/spf13/pflag/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/spf13/pflag - -go 1.12 diff --git a/mantle/vendor/github.com/spf13/pflag/go.sum b/mantle/vendor/github.com/spf13/pflag/go.sum deleted file mode 100644 index e69de29b..00000000 diff --git a/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 41649d26..95d8e59d 100644 --- a/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -1,8 +1,10 @@ package assert import ( + "bytes" "fmt" "reflect" + "time" ) type CompareType int @@ -30,6 +32,9 @@ var ( float64Type = reflect.TypeOf(float64(1)) stringType = reflect.TypeOf("") + + timeType = reflect.TypeOf(time.Time{}) + bytesType = reflect.TypeOf([]byte{}) ) func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { @@ -299,6 +304,47 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return compareLess, true } } + // Check for known struct types we can check for compare results. + case reflect.Struct: + { + // All structs enter here. We're not interested in most types. + if !canConvert(obj1Value, timeType) { + break + } + + // time.Time can compared! + timeObj1, ok := obj1.(time.Time) + if !ok { + timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) + } + + timeObj2, ok := obj2.(time.Time) + if !ok { + timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) + } + + return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + } + case reflect.Slice: + { + // We only care about the []byte type. + if !canConvert(obj1Value, bytesType) { + break + } + + // []byte can be compared! + bytesObj1, ok := obj1.([]byte) + if !ok { + bytesObj1 = obj1Value.Convert(bytesType).Interface().([]byte) + + } + bytesObj2, ok := obj2.([]byte) + if !ok { + bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) + } + + return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + } } return compareEqual, false @@ -310,7 +356,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -320,7 +369,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -329,7 +381,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -339,7 +394,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -347,8 +405,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // assert.Positive(t, 1) // assert.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -356,8 +417,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { // assert.Negative(t, -1) // assert.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs) + return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { diff --git a/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go new file mode 100644 index 00000000..da867903 --- /dev/null +++ b/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go @@ -0,0 +1,16 @@ +//go:build go1.17 +// +build go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_legacy.go + +package assert + +import "reflect" + +// Wrapper around reflect.Value.CanConvert, for compatibility +// reasons. +func canConvert(value reflect.Value, to reflect.Type) bool { + return value.CanConvert(to) +} diff --git a/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go new file mode 100644 index 00000000..1701af2a --- /dev/null +++ b/mantle/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go @@ -0,0 +1,16 @@ +//go:build !go1.17 +// +build !go1.17 + +// TODO: once support for Go 1.16 is dropped, this file can be +// merged/removed with assertion_compare_go1.17_test.go and +// assertion_compare_can_convert.go + +package assert + +import "reflect" + +// Older versions of Go does not have the reflect.Value.CanConvert +// method. +func canConvert(value reflect.Value, to reflect.Type) bool { + return false +} diff --git a/mantle/vendor/github.com/stretchr/testify/assert/assertion_format.go b/mantle/vendor/github.com/stretchr/testify/assert/assertion_format.go index 4dfd1229..7880b8f9 100644 --- a/mantle/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/mantle/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...) } +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...) +} + // ErrorIsf asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { @@ -724,6 +736,16 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } +// WithinRangef asserts that a time is within a time range (inclusive). +// +// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return WithinRange(t, actual, start, end, append([]interface{}{msg}, args...)...) +} + // YAMLEqf asserts that two YAML strings are equivalent. func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/mantle/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/mantle/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 25337a6f..339515b8 100644 --- a/mantle/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/mantle/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args .. return ErrorAsf(a.t, err, target, msg, args...) } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContains(err, expectedErrorSubString) +func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContains(a.t, theError, contains, msgAndArgs...) +} + +// ErrorContainsf asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted") +func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return ErrorContainsf(a.t, theError, contains, msg, args...) +} + // ErrorIs asserts that at least one of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool { @@ -1437,6 +1461,26 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } +// WithinRange asserts that a time is within a time range (inclusive). +// +// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRange(a.t, actual, start, end, msgAndArgs...) +} + +// WithinRangef asserts that a time is within a time range (inclusive). +// +// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return WithinRangef(a.t, actual, start, end, msg, args...) +} + // YAMLEq asserts that two YAML strings are equivalent. func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/mantle/vendor/github.com/stretchr/testify/assert/assertion_order.go b/mantle/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1c3b4718..75944878 100644 --- a/mantle/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/mantle/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) + return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/mantle/vendor/github.com/stretchr/testify/assert/assertions.go b/mantle/vendor/github.com/stretchr/testify/assert/assertions.go index bcac4401..fa1245b1 100644 --- a/mantle/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/mantle/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "os" + "path/filepath" "reflect" "regexp" "runtime" @@ -144,7 +145,8 @@ func CallerInfo() []string { if len(parts) > 1 { dir := parts[len(parts)-2] if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + path, _ := filepath.Abs(file) + callers = append(callers, fmt.Sprintf("%s:%d", path, line)) } } @@ -563,16 +565,17 @@ func isEmpty(object interface{}) bool { switch objValue.Kind() { // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value + // array types are empty when they match their zero-initialized state default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -718,10 +721,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (false, false) if impossible. // return (true, false) if element was not found. // return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { +func containsElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() + listType := reflect.TypeOf(list) + if listType == nil { + return false, false + } + listKind := listType.Kind() defer func() { if e := recover(); e != nil { ok = false @@ -764,7 +771,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) } @@ -787,7 +794,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) h.Helper() } - ok, found := includeElement(s, contains) + ok, found := containsElement(s, contains) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } @@ -811,7 +818,6 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -821,17 +827,35 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + } + } + + return true + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -852,10 +876,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) h.Helper() } if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) + return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) defer func() { if e := recover(); e != nil { ok = false @@ -865,17 +888,35 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) listKind := reflect.TypeOf(list).Kind() subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice { + if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } - if subsetKind != reflect.Array && subsetKind != reflect.Slice { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } + subsetValue := reflect.ValueOf(subset) + if subsetKind == reflect.Map && listKind == reflect.Map { + listValue := reflect.ValueOf(list) + subsetKeys := subsetValue.MapKeys() + + for i := 0; i < len(subsetKeys); i++ { + subsetKey := subsetKeys[i] + subsetElement := subsetValue.MapIndex(subsetKey).Interface() + listElement := listValue.MapIndex(subsetKey).Interface() + + if !ObjectsAreEqual(subsetElement, listElement) { + return true + } + } + + return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) + } + for i := 0; i < subsetValue.Len(); i++ { element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) + ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) } @@ -1000,27 +1041,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() +func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) { + didPanic = true + defer func() { + message = recover() + if didPanic { + stack = string(debug.Stack()) + } }() - return didPanic, message, stack + // call the target function + f() + didPanic = false + return } // Panics asserts that the code inside the specified PanicTestFunc panics. @@ -1111,6 +1146,27 @@ func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, return true } +// WithinRange asserts that a time is within a time range (inclusive). +// +// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if end.Before(start) { + return Fail(t, "Start should be before end", msgAndArgs...) + } + + if actual.Before(start) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is before the range", actual, start, end), msgAndArgs...) + } else if actual.After(end) { + return Fail(t, fmt.Sprintf("Time %v expected to be in time range %v to %v, but is after the range", actual, start, end), msgAndArgs...) + } + + return true +} + func toFloat(x interface{}) (float64, bool) { var xf float64 xok := true @@ -1161,11 +1217,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs bf, bok := toFloat(actual) if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + return Fail(t, "Parameters must be numerical", msgAndArgs...) + } + + if math.IsNaN(af) && math.IsNaN(bf) { + return true } if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) + return Fail(t, "Expected must not be NaN", msgAndArgs...) } if math.IsNaN(bf) { @@ -1188,7 +1248,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1250,8 +1310,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + bf, bok := toFloat(actual) + if !aok || !bok { + return 0, fmt.Errorf("Parameters must be numerical") + } + if math.IsNaN(af) && math.IsNaN(bf) { + return 0, nil } if math.IsNaN(af) { return 0, errors.New("expected value must not be NaN") @@ -1259,10 +1323,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } if math.IsNaN(bf) { return 0, errors.New("actual value must not be NaN") } @@ -1298,7 +1358,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + return Fail(t, "Parameters must be slice", msgAndArgs...) } actualSlice := reflect.ValueOf(actual) @@ -1375,6 +1435,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte return true } +// ErrorContains asserts that a function returned an error (i.e. not `nil`) +// and that the error contains the specified substring. +// +// actualObj, err := SomeFunction() +// assert.ErrorContains(t, err, expectedErrorSubString) +func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !Error(t, theError, msgAndArgs...) { + return false + } + + actual := theError.Error() + if !strings.Contains(actual, contains) { + return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...) + } + + return true +} + // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { @@ -1588,12 +1669,17 @@ func diff(expected interface{}, actual interface{}) string { } var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { + + switch et { + case reflect.TypeOf(""): e = reflect.ValueOf(expected).String() a = reflect.ValueOf(actual).String() + case reflect.TypeOf(time.Time{}): + e = spewConfigStringerEnabled.Sdump(expected) + a = spewConfigStringerEnabled.Sdump(actual) + default: + e = spewConfig.Sdump(expected) + a = spewConfig.Sdump(actual) } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1625,6 +1711,14 @@ var spewConfig = spew.ConfigState{ MaxDepth: 10, } +var spewConfigStringerEnabled = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, + MaxDepth: 10, +} + type tHelper interface { Helper() } diff --git a/mantle/vendor/github.com/ulikunitz/xz/.gitignore b/mantle/vendor/github.com/ulikunitz/xz/.gitignore deleted file mode 100644 index e3c2fc2f..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# .gitignore - -TODO.html -README.html - -lzma/writer.txt -lzma/reader.txt - -cmd/gxz/gxz -cmd/xb/xb - -# test executables -*.test - -# profile files -*.out - -# vim swap file -.*.swp - -# executables on windows -*.exe - -# default compression test file -enwik8* diff --git a/mantle/vendor/github.com/ulikunitz/xz/LICENSE b/mantle/vendor/github.com/ulikunitz/xz/LICENSE deleted file mode 100644 index 009b8487..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/LICENSE +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2014-2021 Ulrich Kunitz -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* My name, Ulrich Kunitz, may not be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mantle/vendor/github.com/ulikunitz/xz/README.md b/mantle/vendor/github.com/ulikunitz/xz/README.md deleted file mode 100644 index 0a2dc828..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Package xz - -This Go language package supports the reading and writing of xz -compressed streams. It includes also a gxz command for compressing and -decompressing data. The package is completely written in Go and doesn't -have any dependency on any C code. - -The package is currently under development. There might be bugs and APIs -are not considered stable. At this time the package cannot compete with -the xz tool regarding compression speed and size. The algorithms there -have been developed over a long time and are highly optimized. However -there are a number of improvements planned and I'm very optimistic about -parallel compression and decompression. Stay tuned! - -## Using the API - -The following example program shows how to use the API. - -```go -package main - -import ( - "bytes" - "io" - "log" - "os" - - "github.com/ulikunitz/xz" -) - -func main() { - const text = "The quick brown fox jumps over the lazy dog.\n" - var buf bytes.Buffer - // compress text - w, err := xz.NewWriter(&buf) - if err != nil { - log.Fatalf("xz.NewWriter error %s", err) - } - if _, err := io.WriteString(w, text); err != nil { - log.Fatalf("WriteString error %s", err) - } - if err := w.Close(); err != nil { - log.Fatalf("w.Close error %s", err) - } - // decompress buffer and write output to stdout - r, err := xz.NewReader(&buf) - if err != nil { - log.Fatalf("NewReader error %s", err) - } - if _, err = io.Copy(os.Stdout, r); err != nil { - log.Fatalf("io.Copy error %s", err) - } -} -``` - -## Using the gxz compression tool - -The package includes a gxz command line utility for compression and -decompression. - -Use following command for installation: - - $ go get github.com/ulikunitz/xz/cmd/gxz - -To test it call the following command. - - $ gxz bigfile - -After some time a much smaller file bigfile.xz will replace bigfile. -To decompress it use the following command. - - $ gxz -d bigfile.xz - diff --git a/mantle/vendor/github.com/ulikunitz/xz/SECURITY.md b/mantle/vendor/github.com/ulikunitz/xz/SECURITY.md deleted file mode 100644 index 5f7ec01b..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/SECURITY.md +++ /dev/null @@ -1,10 +0,0 @@ -# Security Policy - -## Supported Versions - -Currently the last minor version v0.5.x is supported. - -## Reporting a Vulnerability - -Report a vulnerability by creating a Github issue at -. Expect a response in a week. diff --git a/mantle/vendor/github.com/ulikunitz/xz/TODO.md b/mantle/vendor/github.com/ulikunitz/xz/TODO.md deleted file mode 100644 index 594e0c7f..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/TODO.md +++ /dev/null @@ -1,363 +0,0 @@ -# TODO list - -## Release v0.5.x - -1. Support check flag in gxz command. - -## Release v0.6 - -1. Review encoder and check for lzma improvements under xz. -2. Fix binary tree matcher. -3. Compare compression ratio with xz tool using comparable parameters and optimize parameters -4. rename operation action and make it a simple type of size 8 -5. make maxMatches, wordSize parameters -6. stop searching after a certain length is found (parameter sweetLen) - -## Release v0.7 - -1. Optimize code -2. Do statistical analysis to get linear presets. -3. Test sync.Pool compatability for xz and lzma Writer and Reader -4. Fuzz optimized code. - -## Release v0.8 - -1. Support parallel go routines for writing and reading xz files. -2. Support a ReaderAt interface for xz files with small block sizes. -3. Improve compatibility between gxz and xz -4. Provide manual page for gxz - -## Release v0.9 - -1. Improve documentation -2. Fuzz again - -## Release v1.0 - -1. Full functioning gxz -2. Add godoc URL to README.md (godoc.org) -3. Resolve all issues. -4. Define release candidates. -5. Public announcement. - -## Package lzma - -### v0.6 - -* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including - * simple scan at the dictionary head for the same byte - * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) - -## Optimizations - -* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. - -* Check whether batching encoding and decoding improves speed. - -### DAG optimizations - -* Use full buffer to create minimal bit-length above range encoder. -* Might be too slow (see v0.4) - -### Different match finders - -* hashes with 2, 3 characters additional to 4 characters -* binary trees with 2-7 characters (uint64 as key, use uint32 as - - pointers into a an array) - -* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers - - into an array with bit-steeling for the colors) - -## Release Procedure - -* execute goch -l for all packages; probably with lower param like 0.5. -* check orthography with gospell -* Write release notes in doc/relnotes. -* Update README.md -* xb copyright . in xz directory to ensure all new files have Copyright header -* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files -* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. -* Update TODO.md - write short log entry -* `git checkout master && git merge dev` -* `git tag -a ` -* `git push` - -## Log - -### 2021-02-02 - -Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The -function allocated a slice of records immediately after reading the value -without further checks. Since the number has been too large the make function -did panic. The fix is to check the number against the expected number of records -before allocating the records. - -### 2020-12-17 - -Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. - -One fix is interesting. - -```go -const ( - a byte = 0x1 - b = 0x2 -) -``` - -The constants a and b don't have the same type. Correct is - -```go -const ( - a byte = 0x1 - b byte = 0x2 -) -``` - -### 2020-08-19 - -Release v0.5.8 fixes issue -[issue #35](https://github.com/ulikunitz/xz/issues/35). - -### 2020-02-24 - -Release v0.5.7 supports the check-ID None and fixes -[issue #27](https://github.com/ulikunitz/xz/issues/27). - -### 2019-02-20 - -Release v0.5.6 supports the go.mod file. - -### 2018-10-28 - -Release v0.5.5 fixes issues #19 observing ErrLimit outputs. - -### 2017-06-05 - -Release v0.5.4 fixes issues #15 of another problem with the padding size -check for the xz block header. I removed the check completely. - -### 2017-02-15 - -Release v0.5.3 fixes issue #12 regarding the decompression of an empty -XZ stream. Many thanks to Tomasz Kłak, who reported the issue. - -### 2016-12-02 - -Release v0.5.2 became necessary to allow the decoding of xz files with -4-byte padding in the block header. Many thanks to Greg, who reported -the issue. - -### 2016-07-23 - -Release v0.5.1 became necessary to fix problems with 32-bit platforms. -Many thanks to Bruno Brigas, who reported the issue. - -### 2016-07-04 - -Release v0.5 provides improvements to the compressor and provides support for -the decompression of xz files with multiple xz streams. - -### 2016-01-31 - -Another compression rate increase by checking the byte at length of the -best match first, before checking the whole prefix. This makes the -compressor even faster. We have now a large time budget to beat the -compression ratio of the xz tool. For enwik8 we have now over 40 seconds -to reduce the compressed file size for another 7 MiB. - -### 2016-01-30 - -I simplified the encoder. Speed and compression rate increased -dramatically. A high compression rate affects also the decompression -speed. The approach with the buffer and optimizing for operation -compression rate has not been successful. Going for the maximum length -appears to be the best approach. - -### 2016-01-28 - -The release v0.4 is ready. It provides a working xz implementation, -which is rather slow, but works and is interoperable with the xz tool. -It is an important milestone. - -### 2016-01-10 - -I have the first working implementation of an xz reader and writer. I'm -happy about reaching this milestone. - -### 2015-12-02 - -I'm now ready to implement xz because, I have a working LZMA2 -implementation. I decided today that v0.4 will use the slow encoder -using the operations buffer to be able to go back, if I intend to do so. - -### 2015-10-21 - -I have restarted the work on the library. While trying to implement -LZMA2, I discovered that I need to resimplify the encoder and decoder -functions. The option approach is too complicated. Using a limited byte -writer and not caring for written bytes at all and not to try to handle -uncompressed data simplifies the LZMA encoder and decoder much. -Processing uncompressed data and handling limits is a feature of the -LZMA2 format not of LZMA. - -I learned an interesting method from the LZO format. If the last copy is -too far away they are moving the head one 2 bytes and not 1 byte to -reduce processing times. - -### 2015-08-26 - -I have now reimplemented the lzma package. The code is reasonably fast, -but can still be optimized. The next step is to implement LZMA2 and then -xz. - -### 2015-07-05 - -Created release v0.3. The version is the foundation for a full xz -implementation that is the target of v0.4. - -### 2015-06-11 - -The gflag package has been developed because I couldn't use flag and -pflag for a fully compatible support of gzip's and lzma's options. It -seems to work now quite nicely. - -### 2015-06-05 - -The overflow issue was interesting to research, however Henry S. Warren -Jr. Hacker's Delight book was very helpful as usual and had the issue -explained perfectly. Fefe's information on his website was based on the -C FAQ and quite bad, because it didn't address the issue of -MININT == -MININT. - -### 2015-06-04 - -It has been a productive day. I improved the interface of lzma. Reader -and lzma. Writer and fixed the error handling. - -### 2015-06-01 - -By computing the bit length of the LZMA operations I was able to -improve the greedy algorithm implementation. By using an 8 MByte buffer -the compression rate was not as good as for xz but already better then -gzip default. - -Compression is currently slow, but this is something we will be able to -improve over time. - -### 2015-05-26 - -Checked the license of ogier/pflag. The binary lzmago binary should -include the license terms for the pflag library. - -I added the endorsement clause as used by Google for the Go sources the -LICENSE file. - -### 2015-05-22 - -The package lzb contains now the basic implementation for creating or -reading LZMA byte streams. It allows the support for the implementation -of the DAG-shortest-path algorithm for the compression function. - -### 2015-04-23 - -Completed yesterday the lzbase classes. I'm a little bit concerned that -using the components may require too much code, but on the other hand -there is a lot of flexibility. - -### 2015-04-22 - -Implemented Reader and Writer during the Bayern game against Porto. The -second half gave me enough time. - -### 2015-04-21 - -While showering today morning I discovered that the design for OpEncoder -and OpDecoder doesn't work, because encoding/decoding might depend on -the current status of the dictionary. This is not exactly the right way -to start the day. - -Therefore we need to keep the Reader and Writer design. This time around -we simplify it by ignoring size limits. These can be added by wrappers -around the Reader and Writer interfaces. The Parameters type isn't -needed anymore. - -However I will implement a ReaderState and WriterState type to use -static typing to ensure the right State object is combined with the -right lzbase. Reader and lzbase. Writer. - -As a start I have implemented ReaderState and WriterState to ensure -that the state for reading is only used by readers and WriterState only -used by Writers. - -### 2015-04-20 - -Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. - -### 2015-04-08 - -Came up with a new simplified design for lzbase. I implemented already -the type State that replaces OpCodec. - -### 2015-04-06 - -The new lzma package is now fully usable and lzmago is using it now. The -old lzma package has been completely removed. - -### 2015-04-05 - -Implemented lzma. Reader and tested it. - -### 2015-04-04 - -Implemented baseReader by adapting code form lzma. Reader. - -### 2015-04-03 - -The opCodec has been copied yesterday to lzma2. opCodec has a high -number of dependencies on other files in lzma2. Therefore I had to copy -almost all files from lzma. - -### 2015-03-31 - -Removed only a TODO item. - -However in Francesco Campoy's presentation "Go for Javaneros -(Javaïstes?)" is the the idea that using an embedded field E, all the -methods of E will be defined on T. If E is an interface T satisfies E. - - - -I have never used this, but it seems to be a cool idea. - -### 2015-03-30 - -Finished the type writerDict and wrote a simple test. - -### 2015-03-25 - -I started to implement the writerDict. - -### 2015-03-24 - -After thinking long about the LZMA2 code and several false starts, I -have now a plan to create a self-sufficient lzma2 package that supports -the classic LZMA format as well as LZMA2. The core idea is to support a -baseReader and baseWriter type that support the basic LZMA stream -without any headers. Both types must support the reuse of dictionaries -and the opCodec. - -### 2015-01-10 - -1. Implemented simple lzmago tool -2. Tested tool against large 4.4G file - * compression worked correctly; tested decompression with lzma - * decompression hits a full buffer condition -3. Fixed a bug in the compressor and wrote a test for it -4. Executed full cycle for 4.4 GB file; performance can be improved ;-) - -### 2015-01-11 - -* Release v0.2 because of the working LZMA encoder and decoder diff --git a/mantle/vendor/github.com/ulikunitz/xz/bits.go b/mantle/vendor/github.com/ulikunitz/xz/bits.go deleted file mode 100644 index e48450c2..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/bits.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xz - -import ( - "errors" - "io" -) - -// putUint32LE puts the little-endian representation of x into the first -// four bytes of p. -func putUint32LE(p []byte, x uint32) { - p[0] = byte(x) - p[1] = byte(x >> 8) - p[2] = byte(x >> 16) - p[3] = byte(x >> 24) -} - -// putUint64LE puts the little-endian representation of x into the first -// eight bytes of p. -func putUint64LE(p []byte, x uint64) { - p[0] = byte(x) - p[1] = byte(x >> 8) - p[2] = byte(x >> 16) - p[3] = byte(x >> 24) - p[4] = byte(x >> 32) - p[5] = byte(x >> 40) - p[6] = byte(x >> 48) - p[7] = byte(x >> 56) -} - -// uint32LE converts a little endian representation to an uint32 value. -func uint32LE(p []byte) uint32 { - return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | - uint32(p[3])<<24 -} - -// putUvarint puts a uvarint representation of x into the byte slice. -func putUvarint(p []byte, x uint64) int { - i := 0 - for x >= 0x80 { - p[i] = byte(x) | 0x80 - x >>= 7 - i++ - } - p[i] = byte(x) - return i + 1 -} - -// errOverflow indicates an overflow of the 64-bit unsigned integer. -var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") - -// readUvarint reads a uvarint from the given byte reader. -func readUvarint(r io.ByteReader) (x uint64, n int, err error) { - const maxUvarintLen = 10 - - var s uint - i := 0 - for { - b, err := r.ReadByte() - if err != nil { - return x, i, err - } - i++ - if i > maxUvarintLen { - return x, i, errOverflowU64 - } - if b < 0x80 { - if i == maxUvarintLen && b > 1 { - return x, i, errOverflowU64 - } - return x | uint64(b)< 0 { - k = 4 - k - } - return k -} - -/*** Header ***/ - -// headerMagic stores the magic bytes for the header -var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} - -// HeaderLen provides the length of the xz file header. -const HeaderLen = 12 - -// Constants for the checksum methods supported by xz. -const ( - None byte = 0x0 - CRC32 byte = 0x1 - CRC64 byte = 0x4 - SHA256 byte = 0xa -) - -// errInvalidFlags indicates that flags are invalid. -var errInvalidFlags = errors.New("xz: invalid flags") - -// verifyFlags returns the error errInvalidFlags if the value is -// invalid. -func verifyFlags(flags byte) error { - switch flags { - case None, CRC32, CRC64, SHA256: - return nil - default: - return errInvalidFlags - } -} - -// flagstrings maps flag values to strings. -var flagstrings = map[byte]string{ - None: "None", - CRC32: "CRC-32", - CRC64: "CRC-64", - SHA256: "SHA-256", -} - -// flagString returns the string representation for the given flags. -func flagString(flags byte) string { - s, ok := flagstrings[flags] - if !ok { - return "invalid" - } - return s -} - -// newHashFunc returns a function that creates hash instances for the -// hash method encoded in flags. -func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { - switch flags { - case None: - newHash = newNoneHash - case CRC32: - newHash = newCRC32 - case CRC64: - newHash = newCRC64 - case SHA256: - newHash = sha256.New - default: - err = errInvalidFlags - } - return -} - -// header provides the actual content of the xz file header: the flags. -type header struct { - flags byte -} - -// Errors returned by readHeader. -var errHeaderMagic = errors.New("xz: invalid header magic bytes") - -// ValidHeader checks whether data is a correct xz file header. The -// length of data must be HeaderLen. -func ValidHeader(data []byte) bool { - var h header - err := h.UnmarshalBinary(data) - return err == nil -} - -// String returns a string representation of the flags. -func (h header) String() string { - return flagString(h.flags) -} - -// UnmarshalBinary reads header from the provided data slice. -func (h *header) UnmarshalBinary(data []byte) error { - // header length - if len(data) != HeaderLen { - return errors.New("xz: wrong file header length") - } - - // magic header - if !bytes.Equal(headerMagic, data[:6]) { - return errHeaderMagic - } - - // checksum - crc := crc32.NewIEEE() - crc.Write(data[6:8]) - if uint32LE(data[8:]) != crc.Sum32() { - return errors.New("xz: invalid checksum for file header") - } - - // stream flags - if data[6] != 0 { - return errInvalidFlags - } - flags := data[7] - if err := verifyFlags(flags); err != nil { - return err - } - - h.flags = flags - return nil -} - -// MarshalBinary generates the xz file header. -func (h *header) MarshalBinary() (data []byte, err error) { - if err = verifyFlags(h.flags); err != nil { - return nil, err - } - - data = make([]byte, 12) - copy(data, headerMagic) - data[7] = h.flags - - crc := crc32.NewIEEE() - crc.Write(data[6:8]) - putUint32LE(data[8:], crc.Sum32()) - - return data, nil -} - -/*** Footer ***/ - -// footerLen defines the length of the footer. -const footerLen = 12 - -// footerMagic contains the footer magic bytes. -var footerMagic = []byte{'Y', 'Z'} - -// footer represents the content of the xz file footer. -type footer struct { - indexSize int64 - flags byte -} - -// String prints a string representation of the footer structure. -func (f footer) String() string { - return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) -} - -// Minimum and maximum for the size of the index (backward size). -const ( - minIndexSize = 4 - maxIndexSize = (1 << 32) * 4 -) - -// MarshalBinary converts footer values into an xz file footer. Note -// that the footer value is checked for correctness. -func (f *footer) MarshalBinary() (data []byte, err error) { - if err = verifyFlags(f.flags); err != nil { - return nil, err - } - if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { - return nil, errors.New("xz: index size out of range") - } - if f.indexSize%4 != 0 { - return nil, errors.New( - "xz: index size not aligned to four bytes") - } - - data = make([]byte, footerLen) - - // backward size (index size) - s := (f.indexSize / 4) - 1 - putUint32LE(data[4:], uint32(s)) - // flags - data[9] = f.flags - // footer magic - copy(data[10:], footerMagic) - - // CRC-32 - crc := crc32.NewIEEE() - crc.Write(data[4:10]) - putUint32LE(data, crc.Sum32()) - - return data, nil -} - -// UnmarshalBinary sets the footer value by unmarshalling an xz file -// footer. -func (f *footer) UnmarshalBinary(data []byte) error { - if len(data) != footerLen { - return errors.New("xz: wrong footer length") - } - - // magic bytes - if !bytes.Equal(data[10:], footerMagic) { - return errors.New("xz: footer magic invalid") - } - - // CRC-32 - crc := crc32.NewIEEE() - crc.Write(data[4:10]) - if uint32LE(data) != crc.Sum32() { - return errors.New("xz: footer checksum error") - } - - var g footer - // backward size (index size) - g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 - - // flags - if data[8] != 0 { - return errInvalidFlags - } - g.flags = data[9] - if err := verifyFlags(g.flags); err != nil { - return err - } - - *f = g - return nil -} - -/*** Block Header ***/ - -// blockHeader represents the content of an xz block header. -type blockHeader struct { - compressedSize int64 - uncompressedSize int64 - filters []filter -} - -// String converts the block header into a string. -func (h blockHeader) String() string { - var buf bytes.Buffer - first := true - if h.compressedSize >= 0 { - fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) - first = false - } - if h.uncompressedSize >= 0 { - if !first { - buf.WriteString(" ") - } - fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) - first = false - } - for _, f := range h.filters { - if !first { - buf.WriteString(" ") - } - fmt.Fprintf(&buf, "filter %s", f) - first = false - } - return buf.String() -} - -// Masks for the block flags. -const ( - filterCountMask = 0x03 - compressedSizePresent = 0x40 - uncompressedSizePresent = 0x80 - reservedBlockFlags = 0x3C -) - -// errIndexIndicator signals that an index indicator (0x00) has been found -// instead of an expected block header indicator. -var errIndexIndicator = errors.New("xz: found index indicator") - -// readBlockHeader reads the block header. -func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { - var buf bytes.Buffer - buf.Grow(20) - - // block header size - z, err := io.CopyN(&buf, r, 1) - n = int(z) - if err != nil { - return nil, n, err - } - s := buf.Bytes()[0] - if s == 0 { - return nil, n, errIndexIndicator - } - - // read complete header - headerLen := (int(s) + 1) * 4 - buf.Grow(headerLen - 1) - z, err = io.CopyN(&buf, r, int64(headerLen-1)) - n += int(z) - if err != nil { - return nil, n, err - } - - // unmarshal block header - h = new(blockHeader) - if err = h.UnmarshalBinary(buf.Bytes()); err != nil { - return nil, n, err - } - - return h, n, nil -} - -// readSizeInBlockHeader reads the uncompressed or compressed size -// fields in the block header. The present value informs the function -// whether the respective field is actually present in the header. -func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { - if !present { - return -1, nil - } - x, _, err := readUvarint(r) - if err != nil { - return 0, err - } - if x >= 1<<63 { - return 0, errors.New("xz: size overflow in block header") - } - return int64(x), nil -} - -// UnmarshalBinary unmarshals the block header. -func (h *blockHeader) UnmarshalBinary(data []byte) error { - // Check header length - s := data[0] - if data[0] == 0 { - return errIndexIndicator - } - headerLen := (int(s) + 1) * 4 - if len(data) != headerLen { - return fmt.Errorf("xz: data length %d; want %d", len(data), - headerLen) - } - n := headerLen - 4 - - // Check CRC-32 - crc := crc32.NewIEEE() - crc.Write(data[:n]) - if crc.Sum32() != uint32LE(data[n:]) { - return errors.New("xz: checksum error for block header") - } - - // Block header flags - flags := data[1] - if flags&reservedBlockFlags != 0 { - return errors.New("xz: reserved block header flags set") - } - - r := bytes.NewReader(data[2:n]) - - // Compressed size - var err error - h.compressedSize, err = readSizeInBlockHeader( - r, flags&compressedSizePresent != 0) - if err != nil { - return err - } - - // Uncompressed size - h.uncompressedSize, err = readSizeInBlockHeader( - r, flags&uncompressedSizePresent != 0) - if err != nil { - return err - } - - h.filters, err = readFilters(r, int(flags&filterCountMask)+1) - if err != nil { - return err - } - - // Check padding - // Since headerLen is a multiple of 4 we don't need to check - // alignment. - k := r.Len() - // The standard spec says that the padding should have not more - // than 3 bytes. However we found paddings of 4 or 5 in the - // wild. See https://github.com/ulikunitz/xz/pull/11 and - // https://github.com/ulikunitz/xz/issues/15 - // - // The only reasonable approach seems to be to ignore the - // padding size. We still check that all padding bytes are zero. - if !allZeros(data[n-k : n]) { - return errPadding - } - return nil -} - -// MarshalBinary marshals the binary header. -func (h *blockHeader) MarshalBinary() (data []byte, err error) { - if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { - return nil, errors.New("xz: filter count wrong") - } - for i, f := range h.filters { - if i < len(h.filters)-1 { - if f.id() == lzmaFilterID { - return nil, errors.New( - "xz: LZMA2 filter is not the last") - } - } else { - // last filter - if f.id() != lzmaFilterID { - return nil, errors.New("xz: " + - "last filter must be the LZMA2 filter") - } - } - } - - var buf bytes.Buffer - // header size must set at the end - buf.WriteByte(0) - - // flags - flags := byte(len(h.filters) - 1) - if h.compressedSize >= 0 { - flags |= compressedSizePresent - } - if h.uncompressedSize >= 0 { - flags |= uncompressedSizePresent - } - buf.WriteByte(flags) - - p := make([]byte, 10) - if h.compressedSize >= 0 { - k := putUvarint(p, uint64(h.compressedSize)) - buf.Write(p[:k]) - } - if h.uncompressedSize >= 0 { - k := putUvarint(p, uint64(h.uncompressedSize)) - buf.Write(p[:k]) - } - - for _, f := range h.filters { - fp, err := f.MarshalBinary() - if err != nil { - return nil, err - } - buf.Write(fp) - } - - // padding - for i := padLen(int64(buf.Len())); i > 0; i-- { - buf.WriteByte(0) - } - - // crc place holder - buf.Write(p[:4]) - - data = buf.Bytes() - if len(data)%4 != 0 { - panic("data length not aligned") - } - s := len(data)/4 - 1 - if !(1 < s && s <= 255) { - panic("wrong block header size") - } - data[0] = byte(s) - - crc := crc32.NewIEEE() - crc.Write(data[:len(data)-4]) - putUint32LE(data[len(data)-4:], crc.Sum32()) - - return data, nil -} - -// Constants used for marshalling and unmarshalling filters in the xz -// block header. -const ( - minFilters = 1 - maxFilters = 4 - minReservedID = 1 << 62 -) - -// filter represents a filter in the block header. -type filter interface { - id() uint64 - UnmarshalBinary(data []byte) error - MarshalBinary() (data []byte, err error) - reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) - writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) - // filter must be last filter - last() bool -} - -// readFilter reads a block filter from the block header. At this point -// in time only the LZMA2 filter is supported. -func readFilter(r io.Reader) (f filter, err error) { - br := lzma.ByteReader(r) - - // index - id, _, err := readUvarint(br) - if err != nil { - return nil, err - } - - var data []byte - switch id { - case lzmaFilterID: - data = make([]byte, lzmaFilterLen) - data[0] = lzmaFilterID - if _, err = io.ReadFull(r, data[1:]); err != nil { - return nil, err - } - f = new(lzmaFilter) - default: - if id >= minReservedID { - return nil, errors.New( - "xz: reserved filter id in block stream header") - } - return nil, errors.New("xz: invalid filter id") - } - if err = f.UnmarshalBinary(data); err != nil { - return nil, err - } - return f, err -} - -// readFilters reads count filters. At this point in time only the count -// 1 is supported. -func readFilters(r io.Reader, count int) (filters []filter, err error) { - if count != 1 { - return nil, errors.New("xz: unsupported filter count") - } - f, err := readFilter(r) - if err != nil { - return nil, err - } - return []filter{f}, err -} - -/*** Index ***/ - -// record describes a block in the xz file index. -type record struct { - unpaddedSize int64 - uncompressedSize int64 -} - -// readRecord reads an index record. -func readRecord(r io.ByteReader) (rec record, n int, err error) { - u, k, err := readUvarint(r) - n += k - if err != nil { - return rec, n, err - } - rec.unpaddedSize = int64(u) - if rec.unpaddedSize < 0 { - return rec, n, errors.New("xz: unpadded size negative") - } - - u, k, err = readUvarint(r) - n += k - if err != nil { - return rec, n, err - } - rec.uncompressedSize = int64(u) - if rec.uncompressedSize < 0 { - return rec, n, errors.New("xz: uncompressed size negative") - } - - return rec, n, nil -} - -// MarshalBinary converts an index record in its binary encoding. -func (rec *record) MarshalBinary() (data []byte, err error) { - // maximum length of a uvarint is 10 - p := make([]byte, 20) - n := putUvarint(p, uint64(rec.unpaddedSize)) - n += putUvarint(p[n:], uint64(rec.uncompressedSize)) - return p[:n], nil -} - -// writeIndex writes the index, a sequence of records. -func writeIndex(w io.Writer, index []record) (n int64, err error) { - crc := crc32.NewIEEE() - mw := io.MultiWriter(w, crc) - - // index indicator - k, err := mw.Write([]byte{0}) - n += int64(k) - if err != nil { - return n, err - } - - // number of records - p := make([]byte, 10) - k = putUvarint(p, uint64(len(index))) - k, err = mw.Write(p[:k]) - n += int64(k) - if err != nil { - return n, err - } - - // list of records - for _, rec := range index { - p, err := rec.MarshalBinary() - if err != nil { - return n, err - } - k, err = mw.Write(p) - n += int64(k) - if err != nil { - return n, err - } - } - - // index padding - k, err = mw.Write(make([]byte, padLen(int64(n)))) - n += int64(k) - if err != nil { - return n, err - } - - // crc32 checksum - putUint32LE(p, crc.Sum32()) - k, err = w.Write(p[:4]) - n += int64(k) - - return n, err -} - -// readIndexBody reads the index from the reader. It assumes that the -// index indicator has already been read. -func readIndexBody(r io.Reader, expectedRecordLen int) (records []record, n int64, err error) { - crc := crc32.NewIEEE() - // index indicator - crc.Write([]byte{0}) - - br := lzma.ByteReader(io.TeeReader(r, crc)) - - // number of records - u, k, err := readUvarint(br) - n += int64(k) - if err != nil { - return nil, n, err - } - recLen := int(u) - if recLen < 0 || uint64(recLen) != u { - return nil, n, errors.New("xz: record number overflow") - } - if recLen != expectedRecordLen { - return nil, n, fmt.Errorf( - "xz: index length is %d; want %d", - recLen, expectedRecordLen) - } - - // list of records - records = make([]record, recLen) - for i := range records { - records[i], k, err = readRecord(br) - n += int64(k) - if err != nil { - return nil, n, err - } - } - - p := make([]byte, padLen(int64(n+1)), 4) - k, err = io.ReadFull(br.(io.Reader), p) - n += int64(k) - if err != nil { - return nil, n, err - } - if !allZeros(p) { - return nil, n, errors.New("xz: non-zero byte in index padding") - } - - // crc32 - s := crc.Sum32() - p = p[:4] - k, err = io.ReadFull(br.(io.Reader), p) - n += int64(k) - if err != nil { - return records, n, err - } - if uint32LE(p) != s { - return nil, n, errors.New("xz: wrong checksum for index") - } - - return records, n, nil -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/mantle/vendor/github.com/ulikunitz/xz/fox-check-none.xz deleted file mode 100644 index 46043f7dc89b610dc3badb9db3426620c4c97462..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 96 zcmexsUKJ6=z`*cd=%ynRgCe6CkX@qxbTK1?PDnLRM*R tL9s%9S!$6&2~avGv8qxbB|lw{3#g5Ofzej?!NQIFY(?{`7{LOOQ2>-O93KDx diff --git a/mantle/vendor/github.com/ulikunitz/xz/fox.xz b/mantle/vendor/github.com/ulikunitz/xz/fox.xz deleted file mode 100644 index 4b820bd5a16e83fe5db4fb315639a4337f862483..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 104 zcmexsUKJ6=z`*kC+7>q^21Q0O1_p)_{ill=8FWH2QWXkIGn2Cwl8W-n^AytZD-^Oy za|?dFO$zmVVdxt0+m!4eq- E0K@hlng9R* diff --git a/mantle/vendor/github.com/ulikunitz/xz/go.mod b/mantle/vendor/github.com/ulikunitz/xz/go.mod deleted file mode 100644 index 330b675b..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/ulikunitz/xz - -go 1.12 diff --git a/mantle/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/mantle/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go deleted file mode 100644 index f723cf25..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hash - -// CyclicPoly provides a cyclic polynomial rolling hash. -type CyclicPoly struct { - h uint64 - p []uint64 - i int -} - -// ror rotates the unsigned 64-bit integer to right. The argument s must be -// less than 64. -func ror(x uint64, s uint) uint64 { - return (x >> s) | (x << (64 - s)) -} - -// NewCyclicPoly creates a new instance of the CyclicPoly structure. The -// argument n gives the number of bytes for which a hash will be executed. -// This number must be positive; the method panics if this isn't the case. -func NewCyclicPoly(n int) *CyclicPoly { - if n < 1 { - panic("argument n must be positive") - } - return &CyclicPoly{p: make([]uint64, 0, n)} -} - -// Len returns the length of the byte sequence for which a hash is generated. -func (r *CyclicPoly) Len() int { - return cap(r.p) -} - -// RollByte hashes the next byte and returns a hash value. The complete becomes -// available after at least Len() bytes have been hashed. -func (r *CyclicPoly) RollByte(x byte) uint64 { - y := hash[x] - if len(r.p) < cap(r.p) { - r.h = ror(r.h, 1) ^ y - r.p = append(r.p, y) - } else { - r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) - r.h = ror(r.h, 1) ^ y - r.p[r.i] = y - r.i = (r.i + 1) % cap(r.p) - } - return r.h -} - -// Stores the hash for the individual bytes. -var hash = [256]uint64{ - 0x2e4fc3f904065142, 0xc790984cfbc99527, - 0x879f95eb8c62f187, 0x3b61be86b5021ef2, - 0x65a896a04196f0a5, 0xc5b307b80470b59e, - 0xd3bff376a70df14b, 0xc332f04f0b3f1701, - 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, - 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, - 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, - 0x71aadeded184f21e, 0xd73426fccda23b2d, - 0x29773fb5fb9600b5, 0xce410261cd32981a, - 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, - 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, - 0xa5f10b3910482cea, 0x2945d59be02dfaad, - 0x06ee334ff70571b5, 0xbabf9d8070f44380, - 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, - 0x26183cb9f7b1664c, 0xea71dac7da068f21, - 0xea92eca5bd1d0bb7, 0x415595862defcd75, - 0x248a386023c60648, 0x9cf021ab284b3c8a, - 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, - 0x73e799d139dc6975, 0x7b15ae312486363c, - 0xb70e5454a2239c80, 0x208e3fb31d3b2263, - 0x01f563cabb930f44, 0x2ac4533d2a3240d8, - 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, - 0x213c227271c20122, 0x09fe8a9a0a03d07a, - 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, - 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, - 0x70adb010543bea12, 0xcdae938f7ea6f579, - 0x3f3d870208672f4d, 0x8e6ccbce9d349536, - 0xe4c0871a389095ae, 0xf5f2a49152bca080, - 0x9a43f9b97269934e, 0xc17b3753cb6f475c, - 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, - 0xa06d5a011912a550, 0x5537ed19537ad1df, - 0xa32fe713d611449d, 0x2a1d05b47c3b579f, - 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, - 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, - 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, - 0x0b63d5d801708420, 0x8f227ca8f37ffaec, - 0x0256278670887c24, 0x107e14877dbf540b, - 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, - 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, - 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, - 0xd99264421147eb03, 0x535a2d6d38aefcfe, - 0x6ba8b4454a916237, 0xfa39366eaae4719c, - 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, - 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, - 0xd61c2503fe639144, 0x30ce625441eb92d3, - 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, - 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, - 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, - 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, - 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, - 0x7808e902b3857d0b, 0x171c9c4ea4607972, - 0x58d66274850146df, 0x42b311c10d3981d1, - 0x647fa8c621c41a4c, 0xf472771c66ddfedc, - 0x338d27e3f847b46b, 0x6402ce3da97545ce, - 0x5162db616fc38638, 0x9c83be97bc22a50e, - 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, - 0x9454614eb0f81c45, 0x395fb6e742ed39b6, - 0x77dd9179d06037bf, 0xc478d0fee4d2656d, - 0x35d9d6cb772007af, 0x83a56e92c883f0f6, - 0x27937453250c00a1, 0x27bd6ebc3a46a97d, - 0x9f543bf784342d51, 0xd158f38c48b0ed52, - 0x8dd8537c045f66b4, 0x846a57230226f6d5, - 0x6b13939e0c4e7cdf, 0xfca25425d8176758, - 0x92e5fc6cd52788e6, 0x9992e13d7a739170, - 0x518246f7a199e8ea, 0xf104c2a71b9979c7, - 0x86b3ffaabea4768f, 0x6388061cf3e351ad, - 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, - 0x1d759846499e148d, 0x4c0ff015e5f96ef4, - 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, - 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, - 0x337523aabbe6cf8d, 0x646bb14001d42b12, - 0xc178729d138adc74, 0xf900ef4491f24086, - 0xee1a90d334bb5ac4, 0x9755c92247301a50, - 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, - 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, - 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, - 0x80118d4ae46bd210, 0x58ab61a522843733, - 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, - 0x765669e0e5e8157b, 0xa5347830737132f0, - 0x3ba485a69f01510c, 0x0b247d7b957a01c3, - 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, - 0x8b535ed3829b2b14, 0xee41d0cad65d232c, - 0xe6a99ed97a6a982f, 0x65ac6194c202003d, - 0x692accf3a70573eb, 0xcc3c02c3e200d5af, - 0x0d419e8b325914a3, 0x320f160f42c25e40, - 0x00710d647a51fe7a, 0x3c947692330aed60, - 0x9288aa280d355a7a, 0xa1806a9b791d1696, - 0x5d60e38496763da1, 0x6c69e22e613fd0f4, - 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, - 0x460c17992cbaece1, 0xf7822c5444d3297f, - 0x344a9790c69b74aa, 0xb80a42e6cae09dce, - 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, - 0x88e0b7be347627cc, 0x45246009b7a99490, - 0x8011c6dd3fe50472, 0xc341d682bffb99d7, - 0x2511be93808e2d15, 0xd5bc13d7fd739840, - 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, - 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, - 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, - 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, - 0xa559cce0d9199aac, 0xde39d47ef3723380, - 0xe5b69d848ce42e35, 0xefa24296f8e79f52, - 0x70190b59db9a5afc, 0x26f166cdb211e7bf, - 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, - 0xb9059b05e9420d90, 0x2f0da855c9388754, - 0x611d5e9ab77949cc, 0x2912038ac01163f4, - 0x0231df50402b2fba, 0x45660fc4f3245f58, - 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, - 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, - 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, - 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, - 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, - 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, - 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, - 0x6d1b3c1149dda943, 0x372c943a518c1093, - 0xad27af45e77c09c4, 0x3b6f92b646044604, - 0xac2917909f5fcf4f, 0x2069a60e977e5557, - 0x353a469e71014de5, 0x24be356281f55c15, - 0x2b6d710ba8e9adea, 0x404ad1751c749c29, - 0xed7311bf23d7f185, 0xba4f6976b4acc43e, - 0x32d7198d2bc39000, 0xee667019014d6e01, - 0x494ef3e128d14c83, 0x1f95a152baecd6be, - 0x201648dff1f483a5, 0x68c28550c8384af6, - 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, - 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, - 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, - 0xf8f6b97f5585080a, 0x74236084be57b95b, - 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, - 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/mantle/vendor/github.com/ulikunitz/xz/internal/hash/doc.go deleted file mode 100644 index cc60a6b5..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/internal/hash/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package hash provides rolling hashes. - -Rolling hashes have to be used for maintaining the positions of n-byte -sequences in the dictionary buffer. - -The package provides currently the Rabin-Karp rolling hash and a Cyclic -Polynomial hash. Both support the Hashes method to be used with an interface. -*/ -package hash diff --git a/mantle/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/mantle/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go deleted file mode 100644 index c6432913..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hash - -// A is the default constant for Robin-Karp rolling hash. This is a random -// prime. -const A = 0x97b548add41d5da1 - -// RabinKarp supports the computation of a rolling hash. -type RabinKarp struct { - A uint64 - // a^n - aOldest uint64 - h uint64 - p []byte - i int -} - -// NewRabinKarp creates a new RabinKarp value. The argument n defines the -// length of the byte sequence to be hashed. The default constant will will be -// used. -func NewRabinKarp(n int) *RabinKarp { - return NewRabinKarpConst(n, A) -} - -// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the -// length of the byte sequence to be hashed. The argument a provides the -// constant used to compute the hash. -func NewRabinKarpConst(n int, a uint64) *RabinKarp { - if n <= 0 { - panic("number of bytes n must be positive") - } - aOldest := uint64(1) - // There are faster methods. For the small n required by the LZMA - // compressor O(n) is sufficient. - for i := 0; i < n; i++ { - aOldest *= a - } - return &RabinKarp{ - A: a, aOldest: aOldest, - p: make([]byte, 0, n), - } -} - -// Len returns the length of the byte sequence. -func (r *RabinKarp) Len() int { - return cap(r.p) -} - -// RollByte computes the hash after x has been added. -func (r *RabinKarp) RollByte(x byte) uint64 { - if len(r.p) < cap(r.p) { - r.h += uint64(x) - r.h *= r.A - r.p = append(r.p, x) - } else { - r.h -= uint64(r.p[r.i]) * r.aOldest - r.h += uint64(x) - r.h *= r.A - r.p[r.i] = x - r.i = (r.i + 1) % cap(r.p) - } - return r.h -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/mantle/vendor/github.com/ulikunitz/xz/internal/hash/roller.go deleted file mode 100644 index f1de88b4..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/internal/hash/roller.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hash - -// Roller provides an interface for rolling hashes. The hash value will become -// valid after hash has been called Len times. -type Roller interface { - Len() int - RollByte(x byte) uint64 -} - -// Hashes computes all hash values for the array p. Note that the state of the -// roller is changed. -func Hashes(r Roller, p []byte) []uint64 { - n := r.Len() - if len(p) < n { - return nil - } - h := make([]uint64, len(p)-n+1) - for i := 0; i < n-1; i++ { - r.RollByte(p[i]) - } - for i := range h { - h[i] = r.RollByte(p[i+n-1]) - } - return h -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/mantle/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go deleted file mode 100644 index 6c20c77b..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xlog provides a simple logging package that allows to disable -// certain message categories. It defines a type, Logger, with multiple -// methods for formatting output. The package has also a predefined -// 'standard' Logger accessible through helper function Print[f|ln], -// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] -// that are easier to use then creating a Logger manually. That logger -// writes to standard error and prints the date and time of each logged -// message, which can be configured using the function SetFlags. -// -// The Fatal functions call os.Exit(1) after the message is output -// unless not suppressed by the flags. The Panic functions call panic -// after the writing the log message unless suppressed. -package xlog - -import ( - "fmt" - "io" - "os" - "runtime" - "sync" - "time" -) - -// The flags define what information is prefixed to each log entry -// generated by the Logger. The Lno* versions allow the suppression of -// specific output. The bits are or'ed together to control what will be -// printed. There is no control over the order of the items printed and -// the format. The full format is: -// -// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message -// -const ( - Ldate = 1 << iota // the date: 2009-01-23 - Ltime // the time: 01:23:23 - Lmicroseconds // microsecond resolution: 01:23:23.123123 - Llongfile // full file name and line number: /a/b/c/d.go:23 - Lshortfile // final file name element and line number: d.go:23 - Lnopanic // suppresses output from Panic[f|ln] but not the panic call - Lnofatal // suppresses output from Fatal[f|ln] but not the exit - Lnowarn // suppresses output from Warn[f|ln] - Lnoprint // suppresses output from Print[f|ln] - Lnodebug // suppresses output from Debug[f|ln] - // initial values for the standard logger - Lstdflags = Ldate | Ltime | Lnodebug -) - -// A Logger represents an active logging object that generates lines of -// output to an io.Writer. Each logging operation if not suppressed -// makes a single call to the Writer's Write method. A Logger can be -// used simultaneously from multiple goroutines; it guarantees to -// serialize access to the Writer. -type Logger struct { - mu sync.Mutex // ensures atomic writes; and protects the following - // fields - prefix string // prefix to write at beginning of each line - flag int // properties - out io.Writer // destination for output - buf []byte // for accumulating text to write -} - -// New creates a new Logger. The out argument sets the destination to -// which the log output will be written. The prefix appears at the -// beginning of each log line. The flag argument defines the logging -// properties. -func New(out io.Writer, prefix string, flag int) *Logger { - return &Logger{out: out, prefix: prefix, flag: flag} -} - -// std is the standard logger used by the package scope functions. -var std = New(os.Stderr, "", Lstdflags) - -// itoa converts the integer to ASCII. A negative widths will avoid -// zero-padding. The function supports only non-negative integers. -func itoa(buf *[]byte, i int, wid int) { - var u = uint(i) - if u == 0 && wid <= 1 { - *buf = append(*buf, '0') - return - } - var b [32]byte - bp := len(b) - for ; u > 0 || wid > 0; u /= 10 { - bp-- - wid-- - b[bp] = byte(u%10) + '0' - } - *buf = append(*buf, b[bp:]...) -} - -// formatHeader puts the header into the buf field of the buffer. -func (l *Logger) formatHeader(t time.Time, file string, line int) { - l.buf = append(l.buf, l.prefix...) - if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { - if l.flag&Ldate != 0 { - year, month, day := t.Date() - itoa(&l.buf, year, 4) - l.buf = append(l.buf, '-') - itoa(&l.buf, int(month), 2) - l.buf = append(l.buf, '-') - itoa(&l.buf, day, 2) - l.buf = append(l.buf, ' ') - } - if l.flag&(Ltime|Lmicroseconds) != 0 { - hour, min, sec := t.Clock() - itoa(&l.buf, hour, 2) - l.buf = append(l.buf, ':') - itoa(&l.buf, min, 2) - l.buf = append(l.buf, ':') - itoa(&l.buf, sec, 2) - if l.flag&Lmicroseconds != 0 { - l.buf = append(l.buf, '.') - itoa(&l.buf, t.Nanosecond()/1e3, 6) - } - l.buf = append(l.buf, ' ') - } - } - if l.flag&(Lshortfile|Llongfile) != 0 { - if l.flag&Lshortfile != 0 { - short := file - for i := len(file) - 1; i > 0; i-- { - if file[i] == '/' { - short = file[i+1:] - break - } - } - file = short - } - l.buf = append(l.buf, file...) - l.buf = append(l.buf, ':') - itoa(&l.buf, line, -1) - l.buf = append(l.buf, ": "...) - } -} - -func (l *Logger) output(calldepth int, now time.Time, s string) error { - var file string - var line int - if l.flag&(Lshortfile|Llongfile) != 0 { - l.mu.Unlock() - var ok bool - _, file, line, ok = runtime.Caller(calldepth) - if !ok { - file = "???" - line = 0 - } - l.mu.Lock() - } - l.buf = l.buf[:0] - l.formatHeader(now, file, line) - l.buf = append(l.buf, s...) - if len(s) == 0 || s[len(s)-1] != '\n' { - l.buf = append(l.buf, '\n') - } - _, err := l.out.Write(l.buf) - return err -} - -// Output writes the string s with the header controlled by the flags to -// the l.out writer. A newline will be appended if s doesn't end in a -// newline. Calldepth is used to recover the PC, although all current -// calls of Output use the call depth 2. Access to the function is serialized. -func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { - now := time.Now() - l.mu.Lock() - defer l.mu.Unlock() - if l.flag&noflag != 0 { - return nil - } - s := fmt.Sprint(v...) - return l.output(calldepth+1, now, s) -} - -// Outputf works like output but formats the output like Printf. -func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { - now := time.Now() - l.mu.Lock() - defer l.mu.Unlock() - if l.flag&noflag != 0 { - return nil - } - s := fmt.Sprintf(format, v...) - return l.output(calldepth+1, now, s) -} - -// Outputln works like output but formats the output like Println. -func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { - now := time.Now() - l.mu.Lock() - defer l.mu.Unlock() - if l.flag&noflag != 0 { - return nil - } - s := fmt.Sprintln(v...) - return l.output(calldepth+1, now, s) -} - -// Panic prints the message like Print and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func (l *Logger) Panic(v ...interface{}) { - l.Output(2, Lnopanic, v...) - s := fmt.Sprint(v...) - panic(s) -} - -// Panic prints the message like Print and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func Panic(v ...interface{}) { - std.Output(2, Lnopanic, v...) - s := fmt.Sprint(v...) - panic(s) -} - -// Panicf prints the message like Printf and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func (l *Logger) Panicf(format string, v ...interface{}) { - l.Outputf(2, Lnopanic, format, v...) - s := fmt.Sprintf(format, v...) - panic(s) -} - -// Panicf prints the message like Printf and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func Panicf(format string, v ...interface{}) { - std.Outputf(2, Lnopanic, format, v...) - s := fmt.Sprintf(format, v...) - panic(s) -} - -// Panicln prints the message like Println and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func (l *Logger) Panicln(v ...interface{}) { - l.Outputln(2, Lnopanic, v...) - s := fmt.Sprintln(v...) - panic(s) -} - -// Panicln prints the message like Println and calls panic. The printing -// might be suppressed by the flag Lnopanic. -func Panicln(v ...interface{}) { - std.Outputln(2, Lnopanic, v...) - s := fmt.Sprintln(v...) - panic(s) -} - -// Fatal prints the message like Print and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func (l *Logger) Fatal(v ...interface{}) { - l.Output(2, Lnofatal, v...) - os.Exit(1) -} - -// Fatal prints the message like Print and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func Fatal(v ...interface{}) { - std.Output(2, Lnofatal, v...) - os.Exit(1) -} - -// Fatalf prints the message like Printf and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func (l *Logger) Fatalf(format string, v ...interface{}) { - l.Outputf(2, Lnofatal, format, v...) - os.Exit(1) -} - -// Fatalf prints the message like Printf and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func Fatalf(format string, v ...interface{}) { - std.Outputf(2, Lnofatal, format, v...) - os.Exit(1) -} - -// Fatalln prints the message like Println and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func (l *Logger) Fatalln(format string, v ...interface{}) { - l.Outputln(2, Lnofatal, v...) - os.Exit(1) -} - -// Fatalln prints the message like Println and calls os.Exit(1). The -// printing might be suppressed by the flag Lnofatal. -func Fatalln(format string, v ...interface{}) { - std.Outputln(2, Lnofatal, v...) - os.Exit(1) -} - -// Warn prints the message like Print. The printing might be suppressed -// by the flag Lnowarn. -func (l *Logger) Warn(v ...interface{}) { - l.Output(2, Lnowarn, v...) -} - -// Warn prints the message like Print. The printing might be suppressed -// by the flag Lnowarn. -func Warn(v ...interface{}) { - std.Output(2, Lnowarn, v...) -} - -// Warnf prints the message like Printf. The printing might be suppressed -// by the flag Lnowarn. -func (l *Logger) Warnf(format string, v ...interface{}) { - l.Outputf(2, Lnowarn, format, v...) -} - -// Warnf prints the message like Printf. The printing might be suppressed -// by the flag Lnowarn. -func Warnf(format string, v ...interface{}) { - std.Outputf(2, Lnowarn, format, v...) -} - -// Warnln prints the message like Println. The printing might be suppressed -// by the flag Lnowarn. -func (l *Logger) Warnln(v ...interface{}) { - l.Outputln(2, Lnowarn, v...) -} - -// Warnln prints the message like Println. The printing might be suppressed -// by the flag Lnowarn. -func Warnln(v ...interface{}) { - std.Outputln(2, Lnowarn, v...) -} - -// Print prints the message like fmt.Print. The printing might be suppressed -// by the flag Lnoprint. -func (l *Logger) Print(v ...interface{}) { - l.Output(2, Lnoprint, v...) -} - -// Print prints the message like fmt.Print. The printing might be suppressed -// by the flag Lnoprint. -func Print(v ...interface{}) { - std.Output(2, Lnoprint, v...) -} - -// Printf prints the message like fmt.Printf. The printing might be suppressed -// by the flag Lnoprint. -func (l *Logger) Printf(format string, v ...interface{}) { - l.Outputf(2, Lnoprint, format, v...) -} - -// Printf prints the message like fmt.Printf. The printing might be suppressed -// by the flag Lnoprint. -func Printf(format string, v ...interface{}) { - std.Outputf(2, Lnoprint, format, v...) -} - -// Println prints the message like fmt.Println. The printing might be -// suppressed by the flag Lnoprint. -func (l *Logger) Println(v ...interface{}) { - l.Outputln(2, Lnoprint, v...) -} - -// Println prints the message like fmt.Println. The printing might be -// suppressed by the flag Lnoprint. -func Println(v ...interface{}) { - std.Outputln(2, Lnoprint, v...) -} - -// Debug prints the message like Print. The printing might be suppressed -// by the flag Lnodebug. -func (l *Logger) Debug(v ...interface{}) { - l.Output(2, Lnodebug, v...) -} - -// Debug prints the message like Print. The printing might be suppressed -// by the flag Lnodebug. -func Debug(v ...interface{}) { - std.Output(2, Lnodebug, v...) -} - -// Debugf prints the message like Printf. The printing might be suppressed -// by the flag Lnodebug. -func (l *Logger) Debugf(format string, v ...interface{}) { - l.Outputf(2, Lnodebug, format, v...) -} - -// Debugf prints the message like Printf. The printing might be suppressed -// by the flag Lnodebug. -func Debugf(format string, v ...interface{}) { - std.Outputf(2, Lnodebug, format, v...) -} - -// Debugln prints the message like Println. The printing might be suppressed -// by the flag Lnodebug. -func (l *Logger) Debugln(v ...interface{}) { - l.Outputln(2, Lnodebug, v...) -} - -// Debugln prints the message like Println. The printing might be suppressed -// by the flag Lnodebug. -func Debugln(v ...interface{}) { - std.Outputln(2, Lnodebug, v...) -} - -// Flags returns the current flags used by the logger. -func (l *Logger) Flags() int { - l.mu.Lock() - defer l.mu.Unlock() - return l.flag -} - -// Flags returns the current flags used by the standard logger. -func Flags() int { - return std.Flags() -} - -// SetFlags sets the flags of the logger. -func (l *Logger) SetFlags(flag int) { - l.mu.Lock() - defer l.mu.Unlock() - l.flag = flag -} - -// SetFlags sets the flags for the standard logger. -func SetFlags(flag int) { - std.SetFlags(flag) -} - -// Prefix returns the prefix used by the logger. -func (l *Logger) Prefix() string { - l.mu.Lock() - defer l.mu.Unlock() - return l.prefix -} - -// Prefix returns the prefix used by the standard logger of the package. -func Prefix() string { - return std.Prefix() -} - -// SetPrefix sets the prefix for the logger. -func (l *Logger) SetPrefix(prefix string) { - l.mu.Lock() - defer l.mu.Unlock() - l.prefix = prefix -} - -// SetPrefix sets the prefix of the standard logger of the package. -func SetPrefix(prefix string) { - std.SetPrefix(prefix) -} - -// SetOutput sets the output of the logger. -func (l *Logger) SetOutput(w io.Writer) { - l.mu.Lock() - defer l.mu.Unlock() - l.out = w -} - -// SetOutput sets the output for the standard logger of the package. -func SetOutput(w io.Writer) { - std.SetOutput(w) -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/bintree.go deleted file mode 100644 index 2a7bd19e..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/bintree.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "unicode" -) - -// node represents a node in the binary tree. -type node struct { - // x is the search value - x uint32 - // p parent node - p uint32 - // l left child - l uint32 - // r right child - r uint32 -} - -// wordLen is the number of bytes represented by the v field of a node. -const wordLen = 4 - -// binTree supports the identification of the next operation based on a -// binary tree. -// -// Nodes will be identified by their index into the ring buffer. -type binTree struct { - dict *encoderDict - // ring buffer of nodes - node []node - // absolute offset of the entry for the next node. Position 4 - // byte larger. - hoff int64 - // front position in the node ring buffer - front uint32 - // index of the root node - root uint32 - // current x value - x uint32 - // preallocated array - data []byte -} - -// null represents the nonexistent index. We can't use zero because it -// would always exist or we would need to decrease the index for each -// reference. -const null uint32 = 1<<32 - 1 - -// newBinTree initializes the binTree structure. The capacity defines -// the size of the buffer and defines the maximum distance for which -// matches will be found. -func newBinTree(capacity int) (t *binTree, err error) { - if capacity < 1 { - return nil, errors.New( - "newBinTree: capacity must be larger than zero") - } - if int64(capacity) >= int64(null) { - return nil, errors.New( - "newBinTree: capacity must less 2^{32}-1") - } - t = &binTree{ - node: make([]node, capacity), - hoff: -int64(wordLen), - root: null, - data: make([]byte, maxMatchLen), - } - return t, nil -} - -func (t *binTree) SetDict(d *encoderDict) { t.dict = d } - -// WriteByte writes a single byte into the binary tree. -func (t *binTree) WriteByte(c byte) error { - t.x = (t.x << 8) | uint32(c) - t.hoff++ - if t.hoff < 0 { - return nil - } - v := t.front - if int64(v) < t.hoff { - // We are overwriting old nodes stored in the tree. - t.remove(v) - } - t.node[v].x = t.x - t.add(v) - t.front++ - if int64(t.front) >= int64(len(t.node)) { - t.front = 0 - } - return nil -} - -// Writes writes a sequence of bytes into the binTree structure. -func (t *binTree) Write(p []byte) (n int, err error) { - for _, c := range p { - t.WriteByte(c) - } - return len(p), nil -} - -// add puts the node v into the tree. The node must not be part of the -// tree before. -func (t *binTree) add(v uint32) { - vn := &t.node[v] - // Set left and right to null indices. - vn.l, vn.r = null, null - // If the binary tree is empty make v the root. - if t.root == null { - t.root = v - vn.p = null - return - } - x := vn.x - p := t.root - // Search for the right leave link and add the new node. - for { - pn := &t.node[p] - if x <= pn.x { - if pn.l == null { - pn.l = v - vn.p = p - return - } - p = pn.l - } else { - if pn.r == null { - pn.r = v - vn.p = p - return - } - p = pn.r - } - } -} - -// parent returns the parent node index of v and the pointer to v value -// in the parent. -func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { - if t.root == v { - return null, &t.root - } - p = t.node[v].p - if t.node[p].l == v { - ptr = &t.node[p].l - } else { - ptr = &t.node[p].r - } - return -} - -// Remove node v. -func (t *binTree) remove(v uint32) { - vn := &t.node[v] - p, ptr := t.parent(v) - l, r := vn.l, vn.r - if l == null { - // Move the right child up. - *ptr = r - if r != null { - t.node[r].p = p - } - return - } - if r == null { - // Move the left child up. - *ptr = l - t.node[l].p = p - return - } - - // Search the in-order predecessor u. - un := &t.node[l] - ur := un.r - if ur == null { - // In order predecessor is l. Move it up. - un.r = r - t.node[r].p = l - un.p = p - *ptr = l - return - } - var u uint32 - for { - // Look for the max value in the tree where l is root. - u = ur - ur = t.node[u].r - if ur == null { - break - } - } - // replace u with ul - un = &t.node[u] - ul := un.l - up := un.p - t.node[up].r = ul - if ul != null { - t.node[ul].p = up - } - - // replace v by u - un.l, un.r = l, r - t.node[l].p = u - t.node[r].p = u - *ptr = u - un.p = p -} - -// search looks for the node that have the value x or for the nodes that -// brace it. The node highest in the tree with the value x will be -// returned. All other nodes with the same value live in left subtree of -// the returned node. -func (t *binTree) search(v uint32, x uint32) (a, b uint32) { - a, b = null, null - if v == null { - return - } - for { - vn := &t.node[v] - if x <= vn.x { - if x == vn.x { - return v, v - } - b = v - if vn.l == null { - return - } - v = vn.l - } else { - a = v - if vn.r == null { - return - } - v = vn.r - } - } -} - -// max returns the node with maximum value in the subtree with v as -// root. -func (t *binTree) max(v uint32) uint32 { - if v == null { - return null - } - for { - r := t.node[v].r - if r == null { - return v - } - v = r - } -} - -// min returns the node with the minimum value in the subtree with v as -// root. -func (t *binTree) min(v uint32) uint32 { - if v == null { - return null - } - for { - l := t.node[v].l - if l == null { - return v - } - v = l - } -} - -// pred returns the in-order predecessor of node v. -func (t *binTree) pred(v uint32) uint32 { - if v == null { - return null - } - u := t.max(t.node[v].l) - if u != null { - return u - } - for { - p := t.node[v].p - if p == null { - return null - } - if t.node[p].r == v { - return p - } - v = p - } -} - -// succ returns the in-order successor of node v. -func (t *binTree) succ(v uint32) uint32 { - if v == null { - return null - } - u := t.min(t.node[v].r) - if u != null { - return u - } - for { - p := t.node[v].p - if p == null { - return null - } - if t.node[p].l == v { - return p - } - v = p - } -} - -// xval converts the first four bytes of a into an 32-bit unsigned -// integer in big-endian order. -func xval(a []byte) uint32 { - var x uint32 - switch len(a) { - default: - x |= uint32(a[3]) - fallthrough - case 3: - x |= uint32(a[2]) << 8 - fallthrough - case 2: - x |= uint32(a[1]) << 16 - fallthrough - case 1: - x |= uint32(a[0]) << 24 - case 0: - } - return x -} - -// dumpX converts value x into a four-letter string. -func dumpX(x uint32) string { - a := make([]byte, 4) - for i := 0; i < 4; i++ { - c := byte(x >> uint((3-i)*8)) - if unicode.IsGraphic(rune(c)) { - a[i] = c - } else { - a[i] = '.' - } - } - return string(a) -} - -/* -// dumpNode writes a representation of the node v into the io.Writer. -func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { - if v == null { - return - } - - vn := &t.node[v] - - t.dumpNode(w, vn.r, indent+2) - - for i := 0; i < indent; i++ { - fmt.Fprint(w, " ") - } - if vn.p == null { - fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) - } else { - fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) - } - - t.dumpNode(w, vn.l, indent+2) -} - -// dump prints a representation of the binary tree into the writer. -func (t *binTree) dump(w io.Writer) error { - bw := bufio.NewWriter(w) - t.dumpNode(bw, t.root, 0) - return bw.Flush() -} -*/ - -func (t *binTree) distance(v uint32) int { - dist := int(t.front) - int(v) - if dist <= 0 { - dist += len(t.node) - } - return dist -} - -type matchParams struct { - rep [4]uint32 - // length when match will be accepted - nAccept int - // nodes to check - check int - // finish if length get shorter - stopShorter bool -} - -func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, -) (r match, checked int, accepted bool) { - buf := &t.dict.buf - for { - if checked >= p.check { - return m, checked, true - } - dist, ok := distIter() - if !ok { - return m, checked, false - } - checked++ - if m.n > 0 { - i := buf.rear - dist + m.n - 1 - if i < 0 { - i += len(buf.data) - } else if i >= len(buf.data) { - i -= len(buf.data) - } - if buf.data[i] != t.data[m.n-1] { - if p.stopShorter { - return m, checked, false - } - continue - } - } - n := buf.matchLen(dist, t.data) - switch n { - case 0: - if p.stopShorter { - return m, checked, false - } - continue - case 1: - if uint32(dist-minDistance) != p.rep[0] { - continue - } - } - if n < m.n || (n == m.n && int64(dist) >= m.distance) { - continue - } - m = match{int64(dist), n} - if n >= p.nAccept { - return m, checked, true - } - } -} - -func (t *binTree) NextOp(rep [4]uint32) operation { - // retrieve maxMatchLen data - n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) - if n == 0 { - panic("no data in buffer") - } - t.data = t.data[:n] - - var ( - m match - x, u, v uint32 - iterPred, iterSucc func() (int, bool) - ) - p := matchParams{ - rep: rep, - nAccept: maxMatchLen, - check: 32, - } - i := 4 - iterSmall := func() (dist int, ok bool) { - i-- - if i <= 0 { - return 0, false - } - return i, true - } - m, checked, accepted := t.match(m, iterSmall, p) - if accepted { - goto end - } - p.check -= checked - x = xval(t.data) - u, v = t.search(t.root, x) - if u == v && len(t.data) == 4 { - iter := func() (dist int, ok bool) { - if u == null { - return 0, false - } - dist = t.distance(u) - u, v = t.search(t.node[u].l, x) - if u != v { - u = null - } - return dist, true - } - m, _, _ = t.match(m, iter, p) - goto end - } - p.stopShorter = true - iterSucc = func() (dist int, ok bool) { - if v == null { - return 0, false - } - dist = t.distance(v) - v = t.succ(v) - return dist, true - } - m, checked, accepted = t.match(m, iterSucc, p) - if accepted { - goto end - } - p.check -= checked - iterPred = func() (dist int, ok bool) { - if u == null { - return 0, false - } - dist = t.distance(u) - u = t.pred(u) - return dist, true - } - m, _, _ = t.match(m, iterPred, p) -end: - if m.n == 0 { - return lit{t.data[0]} - } - return m -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/bitops.go deleted file mode 100644 index d2c07e8c..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/bitops.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ - -// ntz32Const is used by the functions NTZ and NLZ. -const ntz32Const = 0x04d7651f - -// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. -// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. -var ntz32Table = [32]int8{ - 0, 1, 2, 24, 3, 19, 6, 25, - 22, 4, 20, 10, 16, 7, 12, 26, - 31, 23, 18, 5, 21, 9, 15, 11, - 30, 17, 8, 14, 29, 13, 28, 27, -} - -/* -// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. -func ntz32(x uint32) int { - if x == 0 { - return 32 - } - x = (x & -x) * ntz32Const - return int(ntz32Table[x>>27]) -} -*/ - -// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. -func nlz32(x uint32) int { - // Smear left most bit to the right - x |= x >> 1 - x |= x >> 2 - x |= x >> 4 - x |= x >> 8 - x |= x >> 16 - // Use ntz mechanism to calculate nlz. - x++ - if x == 0 { - return 0 - } - x *= ntz32Const - return 32 - int(ntz32Table[x>>27]) -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/breader.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/breader.go deleted file mode 100644 index 939be884..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/breader.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "io" -) - -// breader provides the ReadByte function for a Reader. It doesn't read -// more data from the reader than absolutely necessary. -type breader struct { - io.Reader - // helper slice to save allocations - p []byte -} - -// ByteReader converts an io.Reader into an io.ByteReader. -func ByteReader(r io.Reader) io.ByteReader { - br, ok := r.(io.ByteReader) - if !ok { - return &breader{r, make([]byte, 1)} - } - return br -} - -// ReadByte read byte function. -func (r *breader) ReadByte() (c byte, err error) { - n, err := r.Reader.Read(r.p) - if n < 1 { - if err == nil { - err = errors.New("breader.ReadByte: no data") - } - return 0, err - } - return r.p[0], nil -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/buffer.go deleted file mode 100644 index 2761de5f..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/buffer.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" -) - -// buffer provides a circular buffer of bytes. If the front index equals -// the rear index the buffer is empty. As a consequence front cannot be -// equal rear for a full buffer. So a full buffer has a length that is -// one byte less the the length of the data slice. -type buffer struct { - data []byte - front int - rear int -} - -// newBuffer creates a buffer with the given size. -func newBuffer(size int) *buffer { - return &buffer{data: make([]byte, size+1)} -} - -// Cap returns the capacity of the buffer. -func (b *buffer) Cap() int { - return len(b.data) - 1 -} - -// Resets the buffer. The front and rear index are set to zero. -func (b *buffer) Reset() { - b.front = 0 - b.rear = 0 -} - -// Buffered returns the number of bytes buffered. -func (b *buffer) Buffered() int { - delta := b.front - b.rear - if delta < 0 { - delta += len(b.data) - } - return delta -} - -// Available returns the number of bytes available for writing. -func (b *buffer) Available() int { - delta := b.rear - 1 - b.front - if delta < 0 { - delta += len(b.data) - } - return delta -} - -// addIndex adds a non-negative integer to the index i and returns the -// resulting index. The function takes care of wrapping the index as -// well as potential overflow situations. -func (b *buffer) addIndex(i int, n int) int { - // subtraction of len(b.data) prevents overflow - i += n - len(b.data) - if i < 0 { - i += len(b.data) - } - return i -} - -// Read reads bytes from the buffer into p and returns the number of -// bytes read. The function never returns an error but might return less -// data than requested. -func (b *buffer) Read(p []byte) (n int, err error) { - n, err = b.Peek(p) - b.rear = b.addIndex(b.rear, n) - return n, err -} - -// Peek reads bytes from the buffer into p without changing the buffer. -// Peek will never return an error but might return less data than -// requested. -func (b *buffer) Peek(p []byte) (n int, err error) { - m := b.Buffered() - n = len(p) - if m < n { - n = m - p = p[:n] - } - k := copy(p, b.data[b.rear:]) - if k < n { - copy(p[k:], b.data) - } - return n, nil -} - -// Discard skips the n next bytes to read from the buffer, returning the -// bytes discarded. -// -// If Discards skips fewer than n bytes, it returns an error. -func (b *buffer) Discard(n int) (discarded int, err error) { - if n < 0 { - return 0, errors.New("buffer.Discard: negative argument") - } - m := b.Buffered() - if m < n { - n = m - err = errors.New( - "buffer.Discard: discarded less bytes then requested") - } - b.rear = b.addIndex(b.rear, n) - return n, err -} - -// ErrNoSpace indicates that there is insufficient space for the Write -// operation. -var ErrNoSpace = errors.New("insufficient space") - -// Write puts data into the buffer. If less bytes are written than -// requested ErrNoSpace is returned. -func (b *buffer) Write(p []byte) (n int, err error) { - m := b.Available() - n = len(p) - if m < n { - n = m - p = p[:m] - err = ErrNoSpace - } - k := copy(b.data[b.front:], p) - if k < n { - copy(b.data, p[k:]) - } - b.front = b.addIndex(b.front, n) - return n, err -} - -// WriteByte writes a single byte into the buffer. The error ErrNoSpace -// is returned if no single byte is available in the buffer for writing. -func (b *buffer) WriteByte(c byte) error { - if b.Available() < 1 { - return ErrNoSpace - } - b.data[b.front] = c - b.front = b.addIndex(b.front, 1) - return nil -} - -// prefixLen returns the length of the common prefix of a and b. -func prefixLen(a, b []byte) int { - if len(a) > len(b) { - a, b = b, a - } - for i, c := range a { - if b[i] != c { - return i - } - } - return len(a) -} - -// matchLen returns the length of the common prefix for the given -// distance from the rear and the byte slice p. -func (b *buffer) matchLen(distance int, p []byte) int { - var n int - i := b.rear - distance - if i < 0 { - if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { - return n - } - p = p[n:] - i = 0 - } - n += prefixLen(p, b.data[i:]) - return n -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go deleted file mode 100644 index 040874c1..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "io" -) - -// ErrLimit indicates that the limit of the LimitedByteWriter has been -// reached. -var ErrLimit = errors.New("limit reached") - -// LimitedByteWriter provides a byte writer that can be written until a -// limit is reached. The field N provides the number of remaining -// bytes. -type LimitedByteWriter struct { - BW io.ByteWriter - N int64 -} - -// WriteByte writes a single byte to the limited byte writer. It returns -// ErrLimit if the limit has been reached. If the byte is successfully -// written the field N of the LimitedByteWriter will be decremented by -// one. -func (l *LimitedByteWriter) WriteByte(c byte) error { - if l.N <= 0 { - return ErrLimit - } - if err := l.BW.WriteByte(c); err != nil { - return err - } - l.N-- - return nil -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/decoder.go deleted file mode 100644 index cbb943a0..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/decoder.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" - "io" -) - -// decoder decodes a raw LZMA stream without any header. -type decoder struct { - // dictionary; the rear pointer of the buffer will be used for - // reading the data. - Dict *decoderDict - // decoder state - State *state - // range decoder - rd *rangeDecoder - // start stores the head value of the dictionary for the LZMA - // stream - start int64 - // size of uncompressed data - size int64 - // end-of-stream encountered - eos bool - // EOS marker found - eosMarker bool -} - -// newDecoder creates a new decoder instance. The parameter size provides -// the expected byte size of the decompressed data. If the size is -// unknown use a negative value. In that case the decoder will look for -// a terminating end-of-stream marker. -func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { - rd, err := newRangeDecoder(br) - if err != nil { - return nil, err - } - d = &decoder{ - State: state, - Dict: dict, - rd: rd, - size: size, - start: dict.pos(), - } - return d, nil -} - -// Reopen restarts the decoder with a new byte reader and a new size. Reopen -// resets the Decompressed counter to zero. -func (d *decoder) Reopen(br io.ByteReader, size int64) error { - var err error - if d.rd, err = newRangeDecoder(br); err != nil { - return err - } - d.start = d.Dict.pos() - d.size = size - d.eos = false - return nil -} - -// decodeLiteral decodes a single literal from the LZMA stream. -func (d *decoder) decodeLiteral() (op operation, err error) { - litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) - match := d.Dict.byteAt(int(d.State.rep[0]) + 1) - s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) - if err != nil { - return nil, err - } - return lit{s}, nil -} - -// errEOS indicates that an EOS marker has been found. -var errEOS = errors.New("EOS marker found") - -// readOp decodes the next operation from the compressed stream. It -// returns the operation. If an explicit end of stream marker is -// identified the eos error is returned. -func (d *decoder) readOp() (op operation, err error) { - // Value of the end of stream (EOS) marker - const eosDist = 1<<32 - 1 - - state, state2, posState := d.State.states(d.Dict.head) - - b, err := d.State.isMatch[state2].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - // literal - op, err := d.decodeLiteral() - if err != nil { - return nil, err - } - d.State.updateStateLiteral() - return op, nil - } - b, err = d.State.isRep[state].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - // simple match - d.State.rep[3], d.State.rep[2], d.State.rep[1] = - d.State.rep[2], d.State.rep[1], d.State.rep[0] - - d.State.updateStateMatch() - // The length decoder returns the length offset. - n, err := d.State.lenCodec.Decode(d.rd, posState) - if err != nil { - return nil, err - } - // The dist decoder returns the distance offset. The actual - // distance is 1 higher. - d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) - if err != nil { - return nil, err - } - if d.State.rep[0] == eosDist { - d.eosMarker = true - return nil, errEOS - } - op = match{n: int(n) + minMatchLen, - distance: int64(d.State.rep[0]) + minDistance} - return op, nil - } - b, err = d.State.isRepG0[state].Decode(d.rd) - if err != nil { - return nil, err - } - dist := d.State.rep[0] - if b == 0 { - // rep match 0 - b, err = d.State.isRepG0Long[state2].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - d.State.updateStateShortRep() - op = match{n: 1, distance: int64(dist) + minDistance} - return op, nil - } - } else { - b, err = d.State.isRepG1[state].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - dist = d.State.rep[1] - } else { - b, err = d.State.isRepG2[state].Decode(d.rd) - if err != nil { - return nil, err - } - if b == 0 { - dist = d.State.rep[2] - } else { - dist = d.State.rep[3] - d.State.rep[3] = d.State.rep[2] - } - d.State.rep[2] = d.State.rep[1] - } - d.State.rep[1] = d.State.rep[0] - d.State.rep[0] = dist - } - n, err := d.State.repLenCodec.Decode(d.rd, posState) - if err != nil { - return nil, err - } - d.State.updateStateRep() - op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} - return op, nil -} - -// apply takes the operation and transforms the decoder dictionary accordingly. -func (d *decoder) apply(op operation) error { - var err error - switch x := op.(type) { - case match: - err = d.Dict.writeMatch(x.distance, x.n) - case lit: - err = d.Dict.WriteByte(x.b) - default: - panic("op is neither a match nor a literal") - } - return err -} - -// decompress fills the dictionary unless no space for new data is -// available. If the end of the LZMA stream has been reached io.EOF will -// be returned. -func (d *decoder) decompress() error { - if d.eos { - return io.EOF - } - for d.Dict.Available() >= maxMatchLen { - op, err := d.readOp() - switch err { - case nil: - // break - case errEOS: - d.eos = true - if !d.rd.possiblyAtEnd() { - return errDataAfterEOS - } - if d.size >= 0 && d.size != d.Decompressed() { - return errSize - } - return io.EOF - case io.EOF: - d.eos = true - return io.ErrUnexpectedEOF - default: - return err - } - if err = d.apply(op); err != nil { - return err - } - if d.size >= 0 && d.Decompressed() >= d.size { - d.eos = true - if d.Decompressed() > d.size { - return errSize - } - if !d.rd.possiblyAtEnd() { - switch _, err = d.readOp(); err { - case nil: - return errSize - case io.EOF: - return io.ErrUnexpectedEOF - case errEOS: - break - default: - return err - } - } - return io.EOF - } - } - return nil -} - -// Errors that may be returned while decoding data. -var ( - errDataAfterEOS = errors.New("lzma: data after end of stream marker") - errSize = errors.New("lzma: wrong uncompressed data size") -) - -// Read reads data from the buffer. If no more data is available io.EOF is -// returned. -func (d *decoder) Read(p []byte) (n int, err error) { - var k int - for { - // Read of decoder dict never returns an error. - k, err = d.Dict.Read(p[n:]) - if err != nil { - panic(fmt.Errorf("dictionary read error %s", err)) - } - if k == 0 && d.eos { - return n, io.EOF - } - n += k - if n >= len(p) { - return n, nil - } - if err = d.decompress(); err != nil && err != io.EOF { - return n, err - } - } -} - -// Decompressed returns the number of bytes decompressed by the decoder. -func (d *decoder) Decompressed() int64 { - return d.Dict.pos() - d.start -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go deleted file mode 100644 index 8cd616ef..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" -) - -// decoderDict provides the dictionary for the decoder. The whole -// dictionary is used as reader buffer. -type decoderDict struct { - buf buffer - head int64 -} - -// newDecoderDict creates a new decoder dictionary. The whole dictionary -// will be used as reader buffer. -func newDecoderDict(dictCap int) (d *decoderDict, err error) { - // lower limit supports easy test cases - if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { - return nil, errors.New("lzma: dictCap out of range") - } - d = &decoderDict{buf: *newBuffer(dictCap)} - return d, nil -} - -// Reset clears the dictionary. The read buffer is not changed, so the -// buffered data can still be read. -func (d *decoderDict) Reset() { - d.head = 0 -} - -// WriteByte writes a single byte into the dictionary. It is used to -// write literals into the dictionary. -func (d *decoderDict) WriteByte(c byte) error { - if err := d.buf.WriteByte(c); err != nil { - return err - } - d.head++ - return nil -} - -// pos returns the position of the dictionary head. -func (d *decoderDict) pos() int64 { return d.head } - -// dictLen returns the actual length of the dictionary. -func (d *decoderDict) dictLen() int { - capacity := d.buf.Cap() - if d.head >= int64(capacity) { - return capacity - } - return int(d.head) -} - -// byteAt returns a byte stored in the dictionary. If the distance is -// non-positive or exceeds the current length of the dictionary the zero -// byte is returned. -func (d *decoderDict) byteAt(dist int) byte { - if !(0 < dist && dist <= d.dictLen()) { - return 0 - } - i := d.buf.front - dist - if i < 0 { - i += len(d.buf.data) - } - return d.buf.data[i] -} - -// writeMatch writes the match at the top of the dictionary. The given -// distance must point in the current dictionary and the length must not -// exceed the maximum length 273 supported in LZMA. -// -// The error value ErrNoSpace indicates that no space is available in -// the dictionary for writing. You need to read from the dictionary -// first. -func (d *decoderDict) writeMatch(dist int64, length int) error { - if !(0 < dist && dist <= int64(d.dictLen())) { - return errors.New("writeMatch: distance out of range") - } - if !(0 < length && length <= maxMatchLen) { - return errors.New("writeMatch: length out of range") - } - if length > d.buf.Available() { - return ErrNoSpace - } - d.head += int64(length) - - i := d.buf.front - int(dist) - if i < 0 { - i += len(d.buf.data) - } - for length > 0 { - var p []byte - if i >= d.buf.front { - p = d.buf.data[i:] - i = 0 - } else { - p = d.buf.data[i:d.buf.front] - i = d.buf.front - } - if len(p) > length { - p = p[:length] - } - if _, err := d.buf.Write(p); err != nil { - panic(fmt.Errorf("d.buf.Write returned error %s", err)) - } - length -= len(p) - } - return nil -} - -// Write writes the given bytes into the dictionary and advances the -// head. -func (d *decoderDict) Write(p []byte) (n int, err error) { - n, err = d.buf.Write(p) - d.head += int64(n) - return n, err -} - -// Available returns the number of available bytes for writing into the -// decoder dictionary. -func (d *decoderDict) Available() int { return d.buf.Available() } - -// Read reads data from the buffer contained in the decoder dictionary. -func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/directcodec.go deleted file mode 100644 index 20b256a9..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/directcodec.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// directCodec allows the encoding and decoding of values with a fixed number -// of bits. The number of bits must be in the range [1,32]. -type directCodec byte - -// Bits returns the number of bits supported by this codec. -func (dc directCodec) Bits() int { - return int(dc) -} - -// Encode uses the range encoder to encode a value with the fixed number of -// bits. The most-significant bit is encoded first. -func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { - for i := int(dc) - 1; i >= 0; i-- { - if err := e.DirectEncodeBit(v >> uint(i)); err != nil { - return err - } - } - return nil -} - -// Decode uses the range decoder to decode a value with the given number of -// given bits. The most-significant bit is decoded first. -func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { - for i := int(dc) - 1; i >= 0; i-- { - x, err := d.DirectDecodeBit() - if err != nil { - return 0, err - } - v = (v << 1) | x - } - return v, nil -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/distcodec.go deleted file mode 100644 index 60ed9aef..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/distcodec.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// Constants used by the distance codec. -const ( - // minimum supported distance - minDistance = 1 - // maximum supported distance, value is used for the eos marker. - maxDistance = 1 << 32 - // number of the supported len states - lenStates = 4 - // start for the position models - startPosModel = 4 - // first index with align bits support - endPosModel = 14 - // bits for the position slots - posSlotBits = 6 - // number of align bits - alignBits = 4 -) - -// distCodec provides encoding and decoding of distance values. -type distCodec struct { - posSlotCodecs [lenStates]treeCodec - posModel [endPosModel - startPosModel]treeReverseCodec - alignCodec treeReverseCodec -} - -// deepcopy initializes dc as deep copy of the source. -func (dc *distCodec) deepcopy(src *distCodec) { - if dc == src { - return - } - for i := range dc.posSlotCodecs { - dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) - } - for i := range dc.posModel { - dc.posModel[i].deepcopy(&src.posModel[i]) - } - dc.alignCodec.deepcopy(&src.alignCodec) -} - -// newDistCodec creates a new distance codec. -func (dc *distCodec) init() { - for i := range dc.posSlotCodecs { - dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) - } - for i := range dc.posModel { - posSlot := startPosModel + i - bits := (posSlot >> 1) - 1 - dc.posModel[i] = makeTreeReverseCodec(bits) - } - dc.alignCodec = makeTreeReverseCodec(alignBits) -} - -// lenState converts the value l to a supported lenState value. -func lenState(l uint32) uint32 { - if l >= lenStates { - l = lenStates - 1 - } - return l -} - -// Encode encodes the distance using the parameter l. Dist can have values from -// the full range of uint32 values. To get the distance offset the actual match -// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) -// indicates the end of the stream. -func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { - // Compute the posSlot using nlz32 - var posSlot uint32 - var bits uint32 - if dist < startPosModel { - posSlot = dist - } else { - bits = uint32(30 - nlz32(dist)) - posSlot = startPosModel - 2 + (bits << 1) - posSlot += (dist >> uint(bits)) & 1 - } - - if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { - return - } - - switch { - case posSlot < startPosModel: - return nil - case posSlot < endPosModel: - tc := &dc.posModel[posSlot-startPosModel] - return tc.Encode(dist, e) - } - dic := directCodec(bits - alignBits) - if err = dic.Encode(e, dist>>alignBits); err != nil { - return - } - return dc.alignCodec.Encode(dist, e) -} - -// Decode decodes the distance offset using the parameter l. The dist value -// 0xffffffff (eos) indicates the end of the stream. Add one to the distance -// offset to get the actual match distance. -func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { - posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) - if err != nil { - return - } - - // posSlot equals distance - if posSlot < startPosModel { - return posSlot, nil - } - - // posSlot uses the individual models - bits := (posSlot >> 1) - 1 - dist = (2 | (posSlot & 1)) << bits - var u uint32 - if posSlot < endPosModel { - tc := &dc.posModel[posSlot-startPosModel] - if u, err = tc.Decode(d); err != nil { - return 0, err - } - dist += u - return dist, nil - } - - // posSlots use direct encoding and a single model for the four align - // bits. - dic := directCodec(bits - alignBits) - if u, err = dic.Decode(d); err != nil { - return 0, err - } - dist += u << alignBits - if u, err = dc.alignCodec.Decode(d); err != nil { - return 0, err - } - dist += u - return dist, nil -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/encoder.go deleted file mode 100644 index 5ed057a7..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/encoder.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "fmt" - "io" -) - -// opLenMargin provides the upper limit of the number of bytes required -// to encode a single operation. -const opLenMargin = 16 - -// compressFlags control the compression process. -type compressFlags uint32 - -// Values for compressFlags. -const ( - // all data should be compressed, even if compression is not - // optimal. - all compressFlags = 1 << iota -) - -// encoderFlags provide the flags for an encoder. -type encoderFlags uint32 - -// Flags for the encoder. -const ( - // eosMarker requests an EOS marker to be written. - eosMarker encoderFlags = 1 << iota -) - -// Encoder compresses data buffered in the encoder dictionary and writes -// it into a byte writer. -type encoder struct { - dict *encoderDict - state *state - re *rangeEncoder - start int64 - // generate eos marker - marker bool - limit bool - margin int -} - -// newEncoder creates a new encoder. If the byte writer must be -// limited use LimitedByteWriter provided by this package. The flags -// argument supports the eosMarker flag, controlling whether a -// terminating end-of-stream marker must be written. -func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, - flags encoderFlags) (e *encoder, err error) { - - re, err := newRangeEncoder(bw) - if err != nil { - return nil, err - } - e = &encoder{ - dict: dict, - state: state, - re: re, - marker: flags&eosMarker != 0, - start: dict.Pos(), - margin: opLenMargin, - } - if e.marker { - e.margin += 5 - } - return e, nil -} - -// Write writes the bytes from p into the dictionary. If not enough -// space is available the data in the dictionary buffer will be -// compressed to make additional space available. If the limit of the -// underlying writer has been reached ErrLimit will be returned. -func (e *encoder) Write(p []byte) (n int, err error) { - for { - k, err := e.dict.Write(p[n:]) - n += k - if err == ErrNoSpace { - if err = e.compress(0); err != nil { - return n, err - } - continue - } - return n, err - } -} - -// Reopen reopens the encoder with a new byte writer. -func (e *encoder) Reopen(bw io.ByteWriter) error { - var err error - if e.re, err = newRangeEncoder(bw); err != nil { - return err - } - e.start = e.dict.Pos() - e.limit = false - return nil -} - -// writeLiteral writes a literal into the LZMA stream -func (e *encoder) writeLiteral(l lit) error { - var err error - state, state2, _ := e.state.states(e.dict.Pos()) - if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { - return err - } - litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) - match := e.dict.ByteAt(int(e.state.rep[0]) + 1) - err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) - if err != nil { - return err - } - e.state.updateStateLiteral() - return nil -} - -// iverson implements the Iverson operator as proposed by Donald Knuth in his -// book Concrete Mathematics. -func iverson(ok bool) uint32 { - if ok { - return 1 - } - return 0 -} - -// writeMatch writes a repetition operation into the operation stream -func (e *encoder) writeMatch(m match) error { - var err error - if !(minDistance <= m.distance && m.distance <= maxDistance) { - panic(fmt.Errorf("match distance %d out of range", m.distance)) - } - dist := uint32(m.distance - minDistance) - if !(minMatchLen <= m.n && m.n <= maxMatchLen) && - !(dist == e.state.rep[0] && m.n == 1) { - panic(fmt.Errorf( - "match length %d out of range; dist %d rep[0] %d", - m.n, dist, e.state.rep[0])) - } - state, state2, posState := e.state.states(e.dict.Pos()) - if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { - return err - } - g := 0 - for ; g < 4; g++ { - if e.state.rep[g] == dist { - break - } - } - b := iverson(g < 4) - if err = e.state.isRep[state].Encode(e.re, b); err != nil { - return err - } - n := uint32(m.n - minMatchLen) - if b == 0 { - // simple match - e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = - e.state.rep[2], e.state.rep[1], e.state.rep[0], dist - e.state.updateStateMatch() - if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { - return err - } - return e.state.distCodec.Encode(e.re, dist, n) - } - b = iverson(g != 0) - if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { - return err - } - if b == 0 { - // g == 0 - b = iverson(m.n != 1) - if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { - return err - } - if b == 0 { - e.state.updateStateShortRep() - return nil - } - } else { - // g in {1,2,3} - b = iverson(g != 1) - if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { - return err - } - if b == 1 { - // g in {2,3} - b = iverson(g != 2) - err = e.state.isRepG2[state].Encode(e.re, b) - if err != nil { - return err - } - if b == 1 { - e.state.rep[3] = e.state.rep[2] - } - e.state.rep[2] = e.state.rep[1] - } - e.state.rep[1] = e.state.rep[0] - e.state.rep[0] = dist - } - e.state.updateStateRep() - return e.state.repLenCodec.Encode(e.re, n, posState) -} - -// writeOp writes a single operation to the range encoder. The function -// checks whether there is enough space available to close the LZMA -// stream. -func (e *encoder) writeOp(op operation) error { - if e.re.Available() < int64(e.margin) { - return ErrLimit - } - switch x := op.(type) { - case lit: - return e.writeLiteral(x) - case match: - return e.writeMatch(x) - default: - panic("unexpected operation") - } -} - -// compress compressed data from the dictionary buffer. If the flag all -// is set, all data in the dictionary buffer will be compressed. The -// function returns ErrLimit if the underlying writer has reached its -// limit. -func (e *encoder) compress(flags compressFlags) error { - n := 0 - if flags&all == 0 { - n = maxMatchLen - 1 - } - d := e.dict - m := d.m - for d.Buffered() > n { - op := m.NextOp(e.state.rep) - if err := e.writeOp(op); err != nil { - return err - } - d.Discard(op.Len()) - } - return nil -} - -// eosMatch is a pseudo operation that indicates the end of the stream. -var eosMatch = match{distance: maxDistance, n: minMatchLen} - -// Close terminates the LZMA stream. If requested the end-of-stream -// marker will be written. If the byte writer limit has been or will be -// reached during compression of the remaining data in the buffer the -// LZMA stream will be closed and data will remain in the buffer. -func (e *encoder) Close() error { - err := e.compress(all) - if err != nil && err != ErrLimit { - return err - } - if e.marker { - if err := e.writeMatch(eosMatch); err != nil { - return err - } - } - err = e.re.Close() - return err -} - -// Compressed returns the number bytes of the input data that been -// compressed. -func (e *encoder) Compressed() int64 { - return e.dict.Pos() - e.start -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go deleted file mode 100644 index 056f8975..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" - "io" -) - -// matcher is an interface that supports the identification of the next -// operation. -type matcher interface { - io.Writer - SetDict(d *encoderDict) - NextOp(rep [4]uint32) operation -} - -// encoderDict provides the dictionary of the encoder. It includes an -// additional buffer atop of the actual dictionary. -type encoderDict struct { - buf buffer - m matcher - head int64 - capacity int - // preallocated array - data [maxMatchLen]byte -} - -// newEncoderDict creates the encoder dictionary. The argument bufSize -// defines the size of the additional buffer. -func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { - if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { - return nil, errors.New( - "lzma: dictionary capacity out of range") - } - if bufSize < 1 { - return nil, errors.New( - "lzma: buffer size must be larger than zero") - } - d = &encoderDict{ - buf: *newBuffer(dictCap + bufSize), - capacity: dictCap, - m: m, - } - m.SetDict(d) - return d, nil -} - -// Discard discards n bytes. Note that n must not be larger than -// MaxMatchLen. -func (d *encoderDict) Discard(n int) { - p := d.data[:n] - k, _ := d.buf.Read(p) - if k < n { - panic(fmt.Errorf("lzma: can't discard %d bytes", n)) - } - d.head += int64(n) - d.m.Write(p) -} - -// Len returns the data available in the encoder dictionary. -func (d *encoderDict) Len() int { - n := d.buf.Available() - if int64(n) > d.head { - return int(d.head) - } - return n -} - -// DictLen returns the actual length of data in the dictionary. -func (d *encoderDict) DictLen() int { - if d.head < int64(d.capacity) { - return int(d.head) - } - return d.capacity -} - -// Available returns the number of bytes that can be written by a -// following Write call. -func (d *encoderDict) Available() int { - return d.buf.Available() - d.DictLen() -} - -// Write writes data into the dictionary buffer. Note that the position -// of the dictionary head will not be moved. If there is not enough -// space in the buffer ErrNoSpace will be returned. -func (d *encoderDict) Write(p []byte) (n int, err error) { - m := d.Available() - if len(p) > m { - p = p[:m] - err = ErrNoSpace - } - var e error - if n, e = d.buf.Write(p); e != nil { - err = e - } - return n, err -} - -// Pos returns the position of the head. -func (d *encoderDict) Pos() int64 { return d.head } - -// ByteAt returns the byte at the given distance. -func (d *encoderDict) ByteAt(distance int) byte { - if !(0 < distance && distance <= d.Len()) { - return 0 - } - i := d.buf.rear - distance - if i < 0 { - i += len(d.buf.data) - } - return d.buf.data[i] -} - -// CopyN copies the last n bytes from the dictionary into the provided -// writer. This is used for copying uncompressed data into an -// uncompressed segment. -func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { - if n <= 0 { - return 0, nil - } - m := d.Len() - if n > m { - n = m - err = ErrNoSpace - } - i := d.buf.rear - n - var e error - if i < 0 { - i += len(d.buf.data) - if written, e = w.Write(d.buf.data[i:]); e != nil { - return written, e - } - i = 0 - } - var k int - k, e = w.Write(d.buf.data[i:d.buf.rear]) - written += k - if e != nil { - err = e - } - return written, err -} - -// Buffered returns the number of bytes in the buffer. -func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/mantle/vendor/github.com/ulikunitz/xz/lzma/fox.lzma deleted file mode 100644 index 5edad633266eb5173a7c39761dc8b9e71efbfe80..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 67 zcma!LU}#|Y4+RWbQXGqzRntCtR~%i$`d{za%}WYWYfXMUl6~Q5_UjH?=5CuO0w(I5 UuQ#VXelz{mI_3ZW`W7$%0HEw6g#Z8m diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/hashtable.go deleted file mode 100644 index 0fb7910b..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/hashtable.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" - - "github.com/ulikunitz/xz/internal/hash" -) - -/* For compression we need to find byte sequences that match the byte - * sequence at the dictionary head. A hash table is a simple method to - * provide this capability. - */ - -// maxMatches limits the number of matches requested from the Matches -// function. This controls the speed of the overall encoding. -const maxMatches = 16 - -// shortDists defines the number of short distances supported by the -// implementation. -const shortDists = 8 - -// The minimum is somehow arbitrary but the maximum is limited by the -// memory requirements of the hash table. -const ( - minTableExponent = 9 - maxTableExponent = 20 -) - -// newRoller contains the function used to create an instance of the -// hash.Roller. -var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } - -// hashTable stores the hash table including the rolling hash method. -// -// We implement chained hashing into a circular buffer. Each entry in -// the circular buffer stores the delta distance to the next position with a -// word that has the same hash value. -type hashTable struct { - dict *encoderDict - // actual hash table - t []int64 - // circular list data with the offset to the next word - data []uint32 - front int - // mask for computing the index for the hash table - mask uint64 - // hash offset; initial value is -int64(wordLen) - hoff int64 - // length of the hashed word - wordLen int - // hash roller for computing the hash values for the Write - // method - wr hash.Roller - // hash roller for computing arbitrary hashes - hr hash.Roller - // preallocated slices - p [maxMatches]int64 - distances [maxMatches + shortDists]int -} - -// hashTableExponent derives the hash table exponent from the dictionary -// capacity. -func hashTableExponent(n uint32) int { - e := 30 - nlz32(n) - switch { - case e < minTableExponent: - e = minTableExponent - case e > maxTableExponent: - e = maxTableExponent - } - return e -} - -// newHashTable creates a new hash table for words of length wordLen -func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { - if !(0 < capacity) { - return nil, errors.New( - "newHashTable: capacity must not be negative") - } - exp := hashTableExponent(uint32(capacity)) - if !(1 <= wordLen && wordLen <= 4) { - return nil, errors.New("newHashTable: " + - "argument wordLen out of range") - } - n := 1 << uint(exp) - if n <= 0 { - panic("newHashTable: exponent is too large") - } - t = &hashTable{ - t: make([]int64, n), - data: make([]uint32, capacity), - mask: (uint64(1) << uint(exp)) - 1, - hoff: -int64(wordLen), - wordLen: wordLen, - wr: newRoller(wordLen), - hr: newRoller(wordLen), - } - return t, nil -} - -func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } - -// buffered returns the number of bytes that are currently hashed. -func (t *hashTable) buffered() int { - n := t.hoff + 1 - switch { - case n <= 0: - return 0 - case n >= int64(len(t.data)): - return len(t.data) - } - return int(n) -} - -// addIndex adds n to an index ensuring that is stays inside the -// circular buffer for the hash chain. -func (t *hashTable) addIndex(i, n int) int { - i += n - len(t.data) - if i < 0 { - i += len(t.data) - } - return i -} - -// putDelta puts the delta instance at the current front of the circular -// chain buffer. -func (t *hashTable) putDelta(delta uint32) { - t.data[t.front] = delta - t.front = t.addIndex(t.front, 1) -} - -// putEntry puts a new entry into the hash table. If there is already a -// value stored it is moved into the circular chain buffer. -func (t *hashTable) putEntry(h uint64, pos int64) { - if pos < 0 { - return - } - i := h & t.mask - old := t.t[i] - 1 - t.t[i] = pos + 1 - var delta int64 - if old >= 0 { - delta = pos - old - if delta > 1<<32-1 || delta > int64(t.buffered()) { - delta = 0 - } - } - t.putDelta(uint32(delta)) -} - -// WriteByte converts a single byte into a hash and puts them into the hash -// table. -func (t *hashTable) WriteByte(b byte) error { - h := t.wr.RollByte(b) - t.hoff++ - t.putEntry(h, t.hoff) - return nil -} - -// Write converts the bytes provided into hash tables and stores the -// abbreviated offsets into the hash table. The method will never return an -// error. -func (t *hashTable) Write(p []byte) (n int, err error) { - for _, b := range p { - // WriteByte doesn't generate an error. - t.WriteByte(b) - } - return len(p), nil -} - -// getMatches the matches for a specific hash. The functions returns the -// number of positions found. -// -// TODO: Make a getDistances because that we are actually interested in. -func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { - if t.hoff < 0 || len(positions) == 0 { - return 0 - } - buffered := t.buffered() - tailPos := t.hoff + 1 - int64(buffered) - rear := t.front - buffered - if rear >= 0 { - rear -= len(t.data) - } - // get the slot for the hash - pos := t.t[h&t.mask] - 1 - delta := pos - tailPos - for { - if delta < 0 { - return n - } - positions[n] = tailPos + delta - n++ - if n >= len(positions) { - return n - } - i := rear + int(delta) - if i < 0 { - i += len(t.data) - } - u := t.data[i] - if u == 0 { - return n - } - delta -= int64(u) - } -} - -// hash computes the rolling hash for the word stored in p. For correct -// results its length must be equal to t.wordLen. -func (t *hashTable) hash(p []byte) uint64 { - var h uint64 - for _, b := range p { - h = t.hr.RollByte(b) - } - return h -} - -// Matches fills the positions slice with potential matches. The -// functions returns the number of positions filled into positions. The -// byte slice p must have word length of the hash table. -func (t *hashTable) Matches(p []byte, positions []int64) int { - if len(p) != t.wordLen { - panic(fmt.Errorf( - "byte slice must have length %d", t.wordLen)) - } - h := t.hash(p) - return t.getMatches(h, positions) -} - -// NextOp identifies the next operation using the hash table. -// -// TODO: Use all repetitions to find matches. -func (t *hashTable) NextOp(rep [4]uint32) operation { - // get positions - data := t.dict.data[:maxMatchLen] - n, _ := t.dict.buf.Peek(data) - data = data[:n] - var p []int64 - if n < t.wordLen { - p = t.p[:0] - } else { - p = t.p[:maxMatches] - n = t.Matches(data[:t.wordLen], p) - p = p[:n] - } - - // convert positions in potential distances - head := t.dict.head - dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) - for _, pos := range p { - dis := int(head - pos) - if dis > shortDists { - dists = append(dists, dis) - } - } - - // check distances - var m match - dictLen := t.dict.DictLen() - for _, dist := range dists { - if dist > dictLen { - continue - } - - // Here comes a trick. We are only interested in matches - // that are longer than the matches we have been found - // before. So before we test the whole byte sequence at - // the given distance, we test the first byte that would - // make the match longer. If it doesn't match the byte - // to match, we don't to care any longer. - i := t.dict.buf.rear - dist + m.n - if i < 0 { - i += len(t.dict.buf.data) - } - if t.dict.buf.data[i] != data[m.n] { - // We can't get a longer match. Jump to the next - // distance. - continue - } - - n := t.dict.buf.matchLen(dist, data) - switch n { - case 0: - continue - case 1: - if uint32(dist-minDistance) != rep[0] { - continue - } - } - if n > m.n { - m = match{int64(dist), n} - if n == len(data) { - // No better match will be found. - break - } - } - } - - if m.n == 0 { - return lit{data[0]} - } - return m -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/header.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/header.go deleted file mode 100644 index 04276c81..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/header.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" -) - -// uint32LE reads an uint32 integer from a byte slice -func uint32LE(b []byte) uint32 { - x := uint32(b[3]) << 24 - x |= uint32(b[2]) << 16 - x |= uint32(b[1]) << 8 - x |= uint32(b[0]) - return x -} - -// uint64LE converts the uint64 value stored as little endian to an uint64 -// value. -func uint64LE(b []byte) uint64 { - x := uint64(b[7]) << 56 - x |= uint64(b[6]) << 48 - x |= uint64(b[5]) << 40 - x |= uint64(b[4]) << 32 - x |= uint64(b[3]) << 24 - x |= uint64(b[2]) << 16 - x |= uint64(b[1]) << 8 - x |= uint64(b[0]) - return x -} - -// putUint32LE puts an uint32 integer into a byte slice that must have at least -// a length of 4 bytes. -func putUint32LE(b []byte, x uint32) { - b[0] = byte(x) - b[1] = byte(x >> 8) - b[2] = byte(x >> 16) - b[3] = byte(x >> 24) -} - -// putUint64LE puts the uint64 value into the byte slice as little endian -// value. The byte slice b must have at least place for 8 bytes. -func putUint64LE(b []byte, x uint64) { - b[0] = byte(x) - b[1] = byte(x >> 8) - b[2] = byte(x >> 16) - b[3] = byte(x >> 24) - b[4] = byte(x >> 32) - b[5] = byte(x >> 40) - b[6] = byte(x >> 48) - b[7] = byte(x >> 56) -} - -// noHeaderSize defines the value of the length field in the LZMA header. -const noHeaderSize uint64 = 1<<64 - 1 - -// HeaderLen provides the length of the LZMA file header. -const HeaderLen = 13 - -// header represents the header of an LZMA file. -type header struct { - properties Properties - dictCap int - // uncompressed size; negative value if no size is given - size int64 -} - -// marshalBinary marshals the header. -func (h *header) marshalBinary() (data []byte, err error) { - if err = h.properties.verify(); err != nil { - return nil, err - } - if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { - return nil, fmt.Errorf("lzma: DictCap %d out of range", - h.dictCap) - } - - data = make([]byte, 13) - - // property byte - data[0] = h.properties.Code() - - // dictionary capacity - putUint32LE(data[1:5], uint32(h.dictCap)) - - // uncompressed size - var s uint64 - if h.size > 0 { - s = uint64(h.size) - } else { - s = noHeaderSize - } - putUint64LE(data[5:], s) - - return data, nil -} - -// unmarshalBinary unmarshals the header. -func (h *header) unmarshalBinary(data []byte) error { - if len(data) != HeaderLen { - return errors.New("lzma.unmarshalBinary: data has wrong length") - } - - // properties - var err error - if h.properties, err = PropertiesForCode(data[0]); err != nil { - return err - } - - // dictionary capacity - h.dictCap = int(uint32LE(data[1:])) - if h.dictCap < 0 { - return errors.New( - "LZMA header: dictionary capacity exceeds maximum " + - "integer") - } - - // uncompressed size - s := uint64LE(data[5:]) - if s == noHeaderSize { - h.size = -1 - } else { - h.size = int64(s) - if h.size < 0 { - return errors.New( - "LZMA header: uncompressed size " + - "out of int64 range") - } - } - - return nil -} - -// validDictCap checks whether the dictionary capacity is correct. This -// is used to weed out wrong file headers. -func validDictCap(dictcap int) bool { - if int64(dictcap) == MaxDictCap { - return true - } - for n := uint(10); n < 32; n++ { - if dictcap == 1<= 10 or 2^32-1. If -// there is an explicit size it must not exceed 256 GiB. The length of -// the data argument must be HeaderLen. -func ValidHeader(data []byte) bool { - var h header - if err := h.unmarshalBinary(data); err != nil { - return false - } - if !validDictCap(h.dictCap) { - return false - } - return h.size < 0 || h.size <= 1<<38 -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/header2.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/header2.go deleted file mode 100644 index be54dd85..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/header2.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" - "io" -) - -const ( - // maximum size of compressed data in a chunk - maxCompressed = 1 << 16 - // maximum size of uncompressed data in a chunk - maxUncompressed = 1 << 21 -) - -// chunkType represents the type of an LZMA2 chunk. Note that this -// value is an internal representation and no actual encoding of a LZMA2 -// chunk header. -type chunkType byte - -// Possible values for the chunk type. -const ( - // end of stream - cEOS chunkType = iota - // uncompressed; reset dictionary - cUD - // uncompressed; no reset of dictionary - cU - // LZMA compressed; no reset - cL - // LZMA compressed; reset state - cLR - // LZMA compressed; reset state; new property value - cLRN - // LZMA compressed; reset state; new property value; reset dictionary - cLRND -) - -// chunkTypeStrings provide a string representation for the chunk types. -var chunkTypeStrings = [...]string{ - cEOS: "EOS", - cU: "U", - cUD: "UD", - cL: "L", - cLR: "LR", - cLRN: "LRN", - cLRND: "LRND", -} - -// String returns a string representation of the chunk type. -func (c chunkType) String() string { - if !(cEOS <= c && c <= cLRND) { - return "unknown" - } - return chunkTypeStrings[c] -} - -// Actual encodings for the chunk types in the value. Note that the high -// uncompressed size bits are stored in the header byte additionally. -const ( - hEOS = 0 - hUD = 1 - hU = 2 - hL = 1 << 7 - hLR = 1<<7 | 1<<5 - hLRN = 1<<7 | 1<<6 - hLRND = 1<<7 | 1<<6 | 1<<5 -) - -// errHeaderByte indicates an unsupported value for the chunk header -// byte. These bytes starts the variable-length chunk header. -var errHeaderByte = errors.New("lzma: unsupported chunk header byte") - -// headerChunkType converts the header byte into a chunk type. It -// ignores the uncompressed size bits in the chunk header byte. -func headerChunkType(h byte) (c chunkType, err error) { - if h&hL == 0 { - // no compression - switch h { - case hEOS: - c = cEOS - case hUD: - c = cUD - case hU: - c = cU - default: - return 0, errHeaderByte - } - return - } - switch h & hLRND { - case hL: - c = cL - case hLR: - c = cLR - case hLRN: - c = cLRN - case hLRND: - c = cLRND - default: - return 0, errHeaderByte - } - return -} - -// uncompressedHeaderLen provides the length of an uncompressed header -const uncompressedHeaderLen = 3 - -// headerLen returns the length of the LZMA2 header for a given chunk -// type. -func headerLen(c chunkType) int { - switch c { - case cEOS: - return 1 - case cU, cUD: - return uncompressedHeaderLen - case cL, cLR: - return 5 - case cLRN, cLRND: - return 6 - } - panic(fmt.Errorf("unsupported chunk type %d", c)) -} - -// chunkHeader represents the contents of a chunk header. -type chunkHeader struct { - ctype chunkType - uncompressed uint32 - compressed uint16 - props Properties -} - -// String returns a string representation of the chunk header. -func (h *chunkHeader) String() string { - return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, - h.compressed, &h.props) -} - -// UnmarshalBinary reads the content of the chunk header from the data -// slice. The slice must have the correct length. -func (h *chunkHeader) UnmarshalBinary(data []byte) error { - if len(data) == 0 { - return errors.New("no data") - } - c, err := headerChunkType(data[0]) - if err != nil { - return err - } - - n := headerLen(c) - if len(data) < n { - return errors.New("incomplete data") - } - if len(data) > n { - return errors.New("invalid data length") - } - - *h = chunkHeader{ctype: c} - if c == cEOS { - return nil - } - - h.uncompressed = uint32(uint16BE(data[1:3])) - if c <= cU { - return nil - } - h.uncompressed |= uint32(data[0]&^hLRND) << 16 - - h.compressed = uint16BE(data[3:5]) - if c <= cLR { - return nil - } - - h.props, err = PropertiesForCode(data[5]) - return err -} - -// MarshalBinary encodes the chunk header value. The function checks -// whether the content of the chunk header is correct. -func (h *chunkHeader) MarshalBinary() (data []byte, err error) { - if h.ctype > cLRND { - return nil, errors.New("invalid chunk type") - } - if err = h.props.verify(); err != nil { - return nil, err - } - - data = make([]byte, headerLen(h.ctype)) - - switch h.ctype { - case cEOS: - return data, nil - case cUD: - data[0] = hUD - case cU: - data[0] = hU - case cL: - data[0] = hL - case cLR: - data[0] = hLR - case cLRN: - data[0] = hLRN - case cLRND: - data[0] = hLRND - } - - putUint16BE(data[1:3], uint16(h.uncompressed)) - if h.ctype <= cU { - return data, nil - } - data[0] |= byte(h.uncompressed>>16) &^ hLRND - - putUint16BE(data[3:5], h.compressed) - if h.ctype <= cLR { - return data, nil - } - - data[5] = h.props.Code() - return data, nil -} - -// readChunkHeader reads the chunk header from the IO reader. -func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { - p := make([]byte, 1, 6) - if _, err = io.ReadFull(r, p); err != nil { - return - } - c, err := headerChunkType(p[0]) - if err != nil { - return - } - p = p[:headerLen(c)] - if _, err = io.ReadFull(r, p[1:]); err != nil { - return - } - h = new(chunkHeader) - if err = h.UnmarshalBinary(p); err != nil { - return nil, err - } - return h, nil -} - -// uint16BE converts a big-endian uint16 representation to an uint16 -// value. -func uint16BE(p []byte) uint16 { - return uint16(p[0])<<8 | uint16(p[1]) -} - -// putUint16BE puts the big-endian uint16 presentation into the given -// slice. -func putUint16BE(p []byte, x uint16) { - p[0] = byte(x >> 8) - p[1] = byte(x) -} - -// chunkState is used to manage the state of the chunks -type chunkState byte - -// start and stop define the initial and terminating state of the chunk -// state -const ( - start chunkState = 'S' - stop chunkState = 'T' -) - -// errors for the chunk state handling -var ( - errChunkType = errors.New("lzma: unexpected chunk type") - errState = errors.New("lzma: wrong chunk state") -) - -// next transitions state based on chunk type input -func (c *chunkState) next(ctype chunkType) error { - switch *c { - // start state - case 'S': - switch ctype { - case cEOS: - *c = 'T' - case cUD: - *c = 'R' - case cLRND: - *c = 'L' - default: - return errChunkType - } - // normal LZMA mode - case 'L': - switch ctype { - case cEOS: - *c = 'T' - case cUD: - *c = 'R' - case cU: - *c = 'U' - case cL, cLR, cLRN, cLRND: - break - default: - return errChunkType - } - // reset required - case 'R': - switch ctype { - case cEOS: - *c = 'T' - case cUD, cU: - break - case cLRN, cLRND: - *c = 'L' - default: - return errChunkType - } - // uncompressed - case 'U': - switch ctype { - case cEOS: - *c = 'T' - case cUD: - *c = 'R' - case cU: - break - case cL, cLR, cLRN, cLRND: - *c = 'L' - default: - return errChunkType - } - // terminal state - case 'T': - return errChunkType - default: - return errState - } - return nil -} - -// defaultChunkType returns the default chunk type for each chunk state. -func (c chunkState) defaultChunkType() chunkType { - switch c { - case 'S': - return cLRND - case 'L', 'U': - return cL - case 'R': - return cLRN - default: - // no error - return cEOS - } -} - -// maxDictCap defines the maximum dictionary capacity supported by the -// LZMA2 dictionary capacity encoding. -const maxDictCap = 1<<32 - 1 - -// maxDictCapCode defines the maximum dictionary capacity code. -const maxDictCapCode = 40 - -// The function decodes the dictionary capacity byte, but doesn't change -// for the correct range of the given byte. -func decodeDictCap(c byte) int64 { - return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) -} - -// DecodeDictCap decodes the encoded dictionary capacity. The function -// returns an error if the code is out of range. -func DecodeDictCap(c byte) (n int64, err error) { - if c >= maxDictCapCode { - if c == maxDictCapCode { - return maxDictCap, nil - } - return 0, errors.New("lzma: invalid dictionary size code") - } - return decodeDictCap(c), nil -} - -// EncodeDictCap encodes a dictionary capacity. The function returns the -// code for the capacity that is greater or equal n. If n exceeds the -// maximum support dictionary capacity, the maximum value is returned. -func EncodeDictCap(n int64) byte { - a, b := byte(0), byte(40) - for a < b { - c := a + (b-a)>>1 - m := decodeDictCap(c) - if n <= m { - if n == m { - return c - } - b = c - } else { - a = c + 1 - } - } - return a -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go deleted file mode 100644 index 6e0edfc8..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import "errors" - -// maxPosBits defines the number of bits of the position value that are used to -// to compute the posState value. The value is used to select the tree codec -// for length encoding and decoding. -const maxPosBits = 4 - -// minMatchLen and maxMatchLen give the minimum and maximum values for -// encoding and decoding length values. minMatchLen is also used as base -// for the encoded length values. -const ( - minMatchLen = 2 - maxMatchLen = minMatchLen + 16 + 256 - 1 -) - -// lengthCodec support the encoding of the length value. -type lengthCodec struct { - choice [2]prob - low [1 << maxPosBits]treeCodec - mid [1 << maxPosBits]treeCodec - high treeCodec -} - -// deepcopy initializes the lc value as deep copy of the source value. -func (lc *lengthCodec) deepcopy(src *lengthCodec) { - if lc == src { - return - } - lc.choice = src.choice - for i := range lc.low { - lc.low[i].deepcopy(&src.low[i]) - } - for i := range lc.mid { - lc.mid[i].deepcopy(&src.mid[i]) - } - lc.high.deepcopy(&src.high) -} - -// init initializes a new length codec. -func (lc *lengthCodec) init() { - for i := range lc.choice { - lc.choice[i] = probInit - } - for i := range lc.low { - lc.low[i] = makeTreeCodec(3) - } - for i := range lc.mid { - lc.mid[i] = makeTreeCodec(3) - } - lc.high = makeTreeCodec(8) -} - -// Encode encodes the length offset. The length offset l can be compute by -// subtracting minMatchLen (2) from the actual length. -// -// l = length - minMatchLen -// -func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, -) (err error) { - if l > maxMatchLen-minMatchLen { - return errors.New("lengthCodec.Encode: l out of range") - } - if l < 8 { - if err = lc.choice[0].Encode(e, 0); err != nil { - return - } - return lc.low[posState].Encode(e, l) - } - if err = lc.choice[0].Encode(e, 1); err != nil { - return - } - if l < 16 { - if err = lc.choice[1].Encode(e, 0); err != nil { - return - } - return lc.mid[posState].Encode(e, l-8) - } - if err = lc.choice[1].Encode(e, 1); err != nil { - return - } - if err = lc.high.Encode(e, l-16); err != nil { - return - } - return nil -} - -// Decode reads the length offset. Add minMatchLen to compute the actual length -// to the length offset l. -func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, -) (l uint32, err error) { - var b uint32 - if b, err = lc.choice[0].Decode(d); err != nil { - return - } - if b == 0 { - l, err = lc.low[posState].Decode(d) - return - } - if b, err = lc.choice[1].Decode(d); err != nil { - return - } - if b == 0 { - l, err = lc.mid[posState].Decode(d) - l += 8 - return - } - l, err = lc.high.Decode(d) - l += 16 - return -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go deleted file mode 100644 index 0bfc763c..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// literalCodec supports the encoding of literal. It provides 768 probability -// values per literal state. The upper 512 probabilities are used with the -// context of a match bit. -type literalCodec struct { - probs []prob -} - -// deepcopy initializes literal codec c as a deep copy of the source. -func (c *literalCodec) deepcopy(src *literalCodec) { - if c == src { - return - } - c.probs = make([]prob, len(src.probs)) - copy(c.probs, src.probs) -} - -// init initializes the literal codec. -func (c *literalCodec) init(lc, lp int) { - switch { - case !(minLC <= lc && lc <= maxLC): - panic("lc out of range") - case !(minLP <= lp && lp <= maxLP): - panic("lp out of range") - } - c.probs = make([]prob, 0x300<= 7 { - m := uint32(match) - for { - matchBit := (m >> 7) & 1 - m <<= 1 - bit := (r >> 7) & 1 - r <<= 1 - i := ((1 + matchBit) << 8) | symbol - if err = probs[i].Encode(e, bit); err != nil { - return - } - symbol = (symbol << 1) | bit - if matchBit != bit { - break - } - if symbol >= 0x100 { - break - } - } - } - for symbol < 0x100 { - bit := (r >> 7) & 1 - r <<= 1 - if err = probs[symbol].Encode(e, bit); err != nil { - return - } - symbol = (symbol << 1) | bit - } - return nil -} - -// Decode decodes a literal byte using the range decoder as well as the LZMA -// state, a match byte, and the literal state. -func (c *literalCodec) Decode(d *rangeDecoder, - state uint32, match byte, litState uint32, -) (s byte, err error) { - k := litState * 0x300 - probs := c.probs[k : k+0x300] - symbol := uint32(1) - if state >= 7 { - m := uint32(match) - for { - matchBit := (m >> 7) & 1 - m <<= 1 - i := ((1 + matchBit) << 8) | symbol - bit, err := d.DecodeBit(&probs[i]) - if err != nil { - return 0, err - } - symbol = (symbol << 1) | bit - if matchBit != bit { - break - } - if symbol >= 0x100 { - break - } - } - } - for symbol < 0x100 { - bit, err := d.DecodeBit(&probs[symbol]) - if err != nil { - return 0, err - } - symbol = (symbol << 1) | bit - } - s = byte(symbol - 0x100) - return s, nil -} - -// minLC and maxLC define the range for LC values. -const ( - minLC = 0 - maxLC = 8 -) - -// minLC and maxLC define the range for LP values. -const ( - minLP = 0 - maxLP = 4 -) diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go deleted file mode 100644 index 96ebda0f..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import "errors" - -// MatchAlgorithm identifies an algorithm to find matches in the -// dictionary. -type MatchAlgorithm byte - -// Supported matcher algorithms. -const ( - HashTable4 MatchAlgorithm = iota - BinaryTree -) - -// maStrings are used by the String method. -var maStrings = map[MatchAlgorithm]string{ - HashTable4: "HashTable4", - BinaryTree: "BinaryTree", -} - -// String returns a string representation of the Matcher. -func (a MatchAlgorithm) String() string { - if s, ok := maStrings[a]; ok { - return s - } - return "unknown" -} - -var errUnsupportedMatchAlgorithm = errors.New( - "lzma: unsupported match algorithm value") - -// verify checks whether the matcher value is supported. -func (a MatchAlgorithm) verify() error { - if _, ok := maStrings[a]; !ok { - return errUnsupportedMatchAlgorithm - } - return nil -} - -func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { - switch a { - case HashTable4: - return newHashTable(dictCap, 4) - case BinaryTree: - return newBinTree(dictCap) - } - return nil, errUnsupportedMatchAlgorithm -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/operation.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/operation.go deleted file mode 100644 index 026ce48a..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/operation.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "fmt" - "unicode" -) - -// operation represents an operation on the dictionary during encoding or -// decoding. -type operation interface { - Len() int -} - -// rep represents a repetition at the given distance and the given length -type match struct { - // supports all possible distance values, including the eos marker - distance int64 - // length - n int -} - -// Len returns the number of bytes matched. -func (m match) Len() int { - return m.n -} - -// String returns a string representation for the repetition. -func (m match) String() string { - return fmt.Sprintf("M{%d,%d}", m.distance, m.n) -} - -// lit represents a single byte literal. -type lit struct { - b byte -} - -// Len returns 1 for the single byte literal. -func (l lit) Len() int { - return 1 -} - -// String returns a string representation for the literal. -func (l lit) String() string { - var c byte - if unicode.IsPrint(rune(l.b)) { - c = l.b - } else { - c = '.' - } - return fmt.Sprintf("L{%c/%02x}", c, l.b) -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/prob.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/prob.go deleted file mode 100644 index 9a2648e0..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/prob.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// movebits defines the number of bits used for the updates of probability -// values. -const movebits = 5 - -// probbits defines the number of bits of a probability value. -const probbits = 11 - -// probInit defines 0.5 as initial value for prob values. -const probInit prob = 1 << (probbits - 1) - -// Type prob represents probabilities. The type can also be used to encode and -// decode single bits. -type prob uint16 - -// Dec decreases the probability. The decrease is proportional to the -// probability value. -func (p *prob) dec() { - *p -= *p >> movebits -} - -// Inc increases the probability. The Increase is proportional to the -// difference of 1 and the probability value. -func (p *prob) inc() { - *p += ((1 << probbits) - *p) >> movebits -} - -// Computes the new bound for a given range using the probability value. -func (p prob) bound(r uint32) uint32 { - return (r >> probbits) * uint32(p) -} - -// Bits returns 1. One is the number of bits that can be encoded or decoded -// with a single prob value. -func (p prob) Bits() int { - return 1 -} - -// Encode encodes the least-significant bit of v. Note that the p value will be -// changed. -func (p *prob) Encode(e *rangeEncoder, v uint32) error { - return e.EncodeBit(v, p) -} - -// Decode decodes a single bit. Note that the p value will change. -func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { - return d.DecodeBit(p) -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/properties.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/properties.go deleted file mode 100644 index f229fc9f..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/properties.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "fmt" -) - -// maximum and minimum values for the LZMA properties. -const ( - minPB = 0 - maxPB = 4 -) - -// maxPropertyCode is the possible maximum of a properties code byte. -const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 - -// Properties contains the parameters LC, LP and PB. The parameter LC -// defines the number of literal context bits; parameter LP the number -// of literal position bits and PB the number of position bits. -type Properties struct { - LC int - LP int - PB int -} - -// String returns the properties in a string representation. -func (p *Properties) String() string { - return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) -} - -// PropertiesForCode converts a properties code byte into a Properties value. -func PropertiesForCode(code byte) (p Properties, err error) { - if code > maxPropertyCode { - return p, errors.New("lzma: invalid properties code") - } - p.LC = int(code % 9) - code /= 9 - p.LP = int(code % 5) - code /= 5 - p.PB = int(code % 5) - return p, err -} - -// verify checks the properties for correctness. -func (p *Properties) verify() error { - if p == nil { - return errors.New("lzma: properties are nil") - } - if !(minLC <= p.LC && p.LC <= maxLC) { - return errors.New("lzma: lc out of range") - } - if !(minLP <= p.LP && p.LP <= maxLP) { - return errors.New("lzma: lp out of range") - } - if !(minPB <= p.PB && p.PB <= maxPB) { - return errors.New("lzma: pb out of range") - } - return nil -} - -// Code converts the properties to a byte. The function assumes that -// the properties components are all in range. -func (p Properties) Code() byte { - return byte((p.PB*5+p.LP)*9 + p.LC) -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go deleted file mode 100644 index 57f1ab90..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "io" -) - -// rangeEncoder implements range encoding of single bits. The low value can -// overflow therefore we need uint64. The cache value is used to handle -// overflows. -type rangeEncoder struct { - lbw *LimitedByteWriter - nrange uint32 - low uint64 - cacheLen int64 - cache byte -} - -// maxInt64 provides the maximal value of the int64 type -const maxInt64 = 1<<63 - 1 - -// newRangeEncoder creates a new range encoder. -func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { - lbw, ok := bw.(*LimitedByteWriter) - if !ok { - lbw = &LimitedByteWriter{BW: bw, N: maxInt64} - } - return &rangeEncoder{ - lbw: lbw, - nrange: 0xffffffff, - cacheLen: 1}, nil -} - -// Available returns the number of bytes that still can be written. The -// method takes the bytes that will be currently written by Close into -// account. -func (e *rangeEncoder) Available() int64 { - return e.lbw.N - (e.cacheLen + 4) -} - -// writeByte writes a single byte to the underlying writer. An error is -// returned if the limit is reached. The written byte will be counted if -// the underlying writer doesn't return an error. -func (e *rangeEncoder) writeByte(c byte) error { - if e.Available() < 1 { - return ErrLimit - } - return e.lbw.WriteByte(c) -} - -// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. -func (e *rangeEncoder) DirectEncodeBit(b uint32) error { - e.nrange >>= 1 - e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) - - // normalize - const top = 1 << 24 - if e.nrange >= top { - return nil - } - e.nrange <<= 8 - return e.shiftLow() -} - -// EncodeBit encodes the least significant bit of b. The p value will be -// updated by the function depending on the bit encoded. -func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { - bound := p.bound(e.nrange) - if b&1 == 0 { - e.nrange = bound - p.inc() - } else { - e.low += uint64(bound) - e.nrange -= bound - p.dec() - } - - // normalize - const top = 1 << 24 - if e.nrange >= top { - return nil - } - e.nrange <<= 8 - return e.shiftLow() -} - -// Close writes a complete copy of the low value. -func (e *rangeEncoder) Close() error { - for i := 0; i < 5; i++ { - if err := e.shiftLow(); err != nil { - return err - } - } - return nil -} - -// shiftLow shifts the low value for 8 bit. The shifted byte is written into -// the byte writer. The cache value is used to handle overflows. -func (e *rangeEncoder) shiftLow() error { - if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { - tmp := e.cache - for { - err := e.writeByte(tmp + byte(e.low>>32)) - if err != nil { - return err - } - tmp = 0xff - e.cacheLen-- - if e.cacheLen <= 0 { - if e.cacheLen < 0 { - panic("negative cacheLen") - } - break - } - } - e.cache = byte(uint32(e.low) >> 24) - } - e.cacheLen++ - e.low = uint64(uint32(e.low) << 8) - return nil -} - -// rangeDecoder decodes single bits of the range encoding stream. -type rangeDecoder struct { - br io.ByteReader - nrange uint32 - code uint32 -} - -// newRangeDecoder initializes a range decoder. It reads five bytes from the -// reader and therefore may return an error. -func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { - d = &rangeDecoder{br: br, nrange: 0xffffffff} - - b, err := d.br.ReadByte() - if err != nil { - return nil, err - } - if b != 0 { - return nil, errors.New("newRangeDecoder: first byte not zero") - } - - for i := 0; i < 4; i++ { - if err = d.updateCode(); err != nil { - return nil, err - } - } - - if d.code >= d.nrange { - return nil, errors.New("newRangeDecoder: d.code >= d.nrange") - } - - return d, nil -} - -// possiblyAtEnd checks whether the decoder may be at the end of the stream. -func (d *rangeDecoder) possiblyAtEnd() bool { - return d.code == 0 -} - -// DirectDecodeBit decodes a bit with probability 1/2. The return value b will -// contain the bit at the least-significant position. All other bits will be -// zero. -func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { - d.nrange >>= 1 - d.code -= d.nrange - t := 0 - (d.code >> 31) - d.code += d.nrange & t - b = (t + 1) & 1 - - // d.code will stay less then d.nrange - - // normalize - // assume d.code < d.nrange - const top = 1 << 24 - if d.nrange >= top { - return b, nil - } - d.nrange <<= 8 - // d.code < d.nrange will be maintained - return b, d.updateCode() -} - -// decodeBit decodes a single bit. The bit will be returned at the -// least-significant position. All other bits will be zero. The probability -// value will be updated. -func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { - bound := p.bound(d.nrange) - if d.code < bound { - d.nrange = bound - p.inc() - b = 0 - } else { - d.code -= bound - d.nrange -= bound - p.dec() - b = 1 - } - // normalize - // assume d.code < d.nrange - const top = 1 << 24 - if d.nrange >= top { - return b, nil - } - d.nrange <<= 8 - // d.code < d.nrange will be maintained - return b, d.updateCode() -} - -// updateCode reads a new byte into the code. -func (d *rangeDecoder) updateCode() error { - b, err := d.br.ReadByte() - if err != nil { - return err - } - d.code = (d.code << 8) | uint32(b) - return nil -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/reader.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/reader.go deleted file mode 100644 index 2ed13c88..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/reader.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lzma supports the decoding and encoding of LZMA streams. -// Reader and Writer support the classic LZMA format. Reader2 and -// Writer2 support the decoding and encoding of LZMA2 streams. -// -// The package is written completely in Go and doesn't rely on any external -// library. -package lzma - -import ( - "errors" - "io" -) - -// ReaderConfig stores the parameters for the reader of the classic LZMA -// format. -type ReaderConfig struct { - DictCap int -} - -// fill converts the zero values of the configuration to the default values. -func (c *ReaderConfig) fill() { - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } -} - -// Verify checks the reader configuration for errors. Zero values will -// be replaced by default values. -func (c *ReaderConfig) Verify() error { - c.fill() - if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { - return errors.New("lzma: dictionary capacity is out of range") - } - return nil -} - -// Reader provides a reader for LZMA files or streams. -type Reader struct { - lzma io.Reader - h header - d *decoder -} - -// NewReader creates a new reader for an LZMA stream using the classic -// format. NewReader reads and checks the header of the LZMA stream. -func NewReader(lzma io.Reader) (r *Reader, err error) { - return ReaderConfig{}.NewReader(lzma) -} - -// NewReader creates a new reader for an LZMA stream in the classic -// format. The function reads and verifies the the header of the LZMA -// stream. -func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - data := make([]byte, HeaderLen) - if _, err := io.ReadFull(lzma, data); err != nil { - if err == io.EOF { - return nil, errors.New("lzma: unexpected EOF") - } - return nil, err - } - r = &Reader{lzma: lzma} - if err = r.h.unmarshalBinary(data); err != nil { - return nil, err - } - if r.h.dictCap < MinDictCap { - return nil, errors.New("lzma: dictionary capacity too small") - } - dictCap := r.h.dictCap - if c.DictCap > dictCap { - dictCap = c.DictCap - } - - state := newState(r.h.properties) - dict, err := newDecoderDict(dictCap) - if err != nil { - return nil, err - } - r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) - if err != nil { - return nil, err - } - return r, nil -} - -// EOSMarker indicates that an EOS marker has been encountered. -func (r *Reader) EOSMarker() bool { - return r.d.eosMarker -} - -// Read returns uncompressed data. -func (r *Reader) Read(p []byte) (n int, err error) { - return r.d.Read(p) -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/reader2.go deleted file mode 100644 index de3da37e..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/reader2.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "errors" - "io" - - "github.com/ulikunitz/xz/internal/xlog" -) - -// Reader2Config stores the parameters for the LZMA2 reader. -// format. -type Reader2Config struct { - DictCap int -} - -// fill converts the zero values of the configuration to the default values. -func (c *Reader2Config) fill() { - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } -} - -// Verify checks the reader configuration for errors. Zero configuration values -// will be replaced by default values. -func (c *Reader2Config) Verify() error { - c.fill() - if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { - return errors.New("lzma: dictionary capacity is out of range") - } - return nil -} - -// Reader2 supports the reading of LZMA2 chunk sequences. Note that the -// first chunk should have a dictionary reset and the first compressed -// chunk a properties reset. The chunk sequence may not be terminated by -// an end-of-stream chunk. -type Reader2 struct { - r io.Reader - err error - - dict *decoderDict - ur *uncompressedReader - decoder *decoder - chunkReader io.Reader - - cstate chunkState -} - -// NewReader2 creates a reader for an LZMA2 chunk sequence. -func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { - return Reader2Config{}.NewReader2(lzma2) -} - -// NewReader2 creates an LZMA2 reader using the given configuration. -func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - r = &Reader2{r: lzma2, cstate: start} - r.dict, err = newDecoderDict(c.DictCap) - if err != nil { - return nil, err - } - if err = r.startChunk(); err != nil { - r.err = err - } - return r, nil -} - -// uncompressed tests whether the chunk type specifies an uncompressed -// chunk. -func uncompressed(ctype chunkType) bool { - return ctype == cU || ctype == cUD -} - -// startChunk parses a new chunk. -func (r *Reader2) startChunk() error { - r.chunkReader = nil - header, err := readChunkHeader(r.r) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - xlog.Debugf("chunk header %v", header) - if err = r.cstate.next(header.ctype); err != nil { - return err - } - if r.cstate == stop { - return io.EOF - } - if header.ctype == cUD || header.ctype == cLRND { - r.dict.Reset() - } - size := int64(header.uncompressed) + 1 - if uncompressed(header.ctype) { - if r.ur != nil { - r.ur.Reopen(r.r, size) - } else { - r.ur = newUncompressedReader(r.r, r.dict, size) - } - r.chunkReader = r.ur - return nil - } - br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) - if r.decoder == nil { - state := newState(header.props) - r.decoder, err = newDecoder(br, state, r.dict, size) - if err != nil { - return err - } - r.chunkReader = r.decoder - return nil - } - switch header.ctype { - case cLR: - r.decoder.State.Reset() - case cLRN, cLRND: - r.decoder.State = newState(header.props) - } - err = r.decoder.Reopen(br, size) - if err != nil { - return err - } - r.chunkReader = r.decoder - return nil -} - -// Read reads data from the LZMA2 chunk sequence. -func (r *Reader2) Read(p []byte) (n int, err error) { - if r.err != nil { - return 0, r.err - } - for n < len(p) { - var k int - k, err = r.chunkReader.Read(p[n:]) - n += k - if err != nil { - if err == io.EOF { - err = r.startChunk() - if err == nil { - continue - } - } - r.err = err - return n, err - } - if k == 0 { - r.err = errors.New("lzma: Reader2 doesn't get data") - return n, r.err - } - } - return n, nil -} - -// EOS returns whether the LZMA2 stream has been terminated by an -// end-of-stream chunk. -func (r *Reader2) EOS() bool { - return r.cstate == stop -} - -// uncompressedReader is used to read uncompressed chunks. -type uncompressedReader struct { - lr io.LimitedReader - Dict *decoderDict - eof bool - err error -} - -// newUncompressedReader initializes a new uncompressedReader. -func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { - ur := &uncompressedReader{ - lr: io.LimitedReader{R: r, N: size}, - Dict: dict, - } - return ur -} - -// Reopen reinitializes an uncompressed reader. -func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { - ur.err = nil - ur.eof = false - ur.lr = io.LimitedReader{R: r, N: size} -} - -// fill reads uncompressed data into the dictionary. -func (ur *uncompressedReader) fill() error { - if !ur.eof { - n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) - if err != io.EOF { - return err - } - ur.eof = true - if n > 0 { - return nil - } - } - if ur.lr.N != 0 { - return io.ErrUnexpectedEOF - } - return io.EOF -} - -// Read reads uncompressed data from the limited reader. -func (ur *uncompressedReader) Read(p []byte) (n int, err error) { - if ur.err != nil { - return 0, ur.err - } - for { - var k int - k, err = ur.Dict.Read(p[n:]) - n += k - if n >= len(p) { - return n, nil - } - if err != nil { - break - } - err = ur.fill() - if err != nil { - break - } - } - ur.err = err - return n, err -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/state.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/state.go deleted file mode 100644 index 09d62f7d..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/state.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// states defines the overall state count -const states = 12 - -// State maintains the full state of the operation encoding or decoding -// process. -type state struct { - rep [4]uint32 - isMatch [states << maxPosBits]prob - isRepG0Long [states << maxPosBits]prob - isRep [states]prob - isRepG0 [states]prob - isRepG1 [states]prob - isRepG2 [states]prob - litCodec literalCodec - lenCodec lengthCodec - repLenCodec lengthCodec - distCodec distCodec - state uint32 - posBitMask uint32 - Properties Properties -} - -// initProbSlice initializes a slice of probabilities. -func initProbSlice(p []prob) { - for i := range p { - p[i] = probInit - } -} - -// Reset sets all state information to the original values. -func (s *state) Reset() { - p := s.Properties - *s = state{ - Properties: p, - // dict: s.dict, - posBitMask: (uint32(1) << uint(p.PB)) - 1, - } - initProbSlice(s.isMatch[:]) - initProbSlice(s.isRep[:]) - initProbSlice(s.isRepG0[:]) - initProbSlice(s.isRepG1[:]) - initProbSlice(s.isRepG2[:]) - initProbSlice(s.isRepG0Long[:]) - s.litCodec.init(p.LC, p.LP) - s.lenCodec.init() - s.repLenCodec.init() - s.distCodec.init() -} - -// newState creates a new state from the give Properties. -func newState(p Properties) *state { - s := &state{Properties: p} - s.Reset() - return s -} - -// deepcopy initializes s as a deep copy of the source. -func (s *state) deepcopy(src *state) { - if s == src { - return - } - s.rep = src.rep - s.isMatch = src.isMatch - s.isRepG0Long = src.isRepG0Long - s.isRep = src.isRep - s.isRepG0 = src.isRepG0 - s.isRepG1 = src.isRepG1 - s.isRepG2 = src.isRepG2 - s.litCodec.deepcopy(&src.litCodec) - s.lenCodec.deepcopy(&src.lenCodec) - s.repLenCodec.deepcopy(&src.repLenCodec) - s.distCodec.deepcopy(&src.distCodec) - s.state = src.state - s.posBitMask = src.posBitMask - s.Properties = src.Properties -} - -// cloneState creates a new clone of the give state. -func cloneState(src *state) *state { - s := new(state) - s.deepcopy(src) - return s -} - -// updateStateLiteral updates the state for a literal. -func (s *state) updateStateLiteral() { - switch { - case s.state < 4: - s.state = 0 - return - case s.state < 10: - s.state -= 3 - return - } - s.state -= 6 -} - -// updateStateMatch updates the state for a match. -func (s *state) updateStateMatch() { - if s.state < 7 { - s.state = 7 - } else { - s.state = 10 - } -} - -// updateStateRep updates the state for a repetition. -func (s *state) updateStateRep() { - if s.state < 7 { - s.state = 8 - } else { - s.state = 11 - } -} - -// updateStateShortRep updates the state for a short repetition. -func (s *state) updateStateShortRep() { - if s.state < 7 { - s.state = 9 - } else { - s.state = 11 - } -} - -// states computes the states of the operation codec. -func (s *state) states(dictHead int64) (state1, state2, posState uint32) { - state1 = s.state - posState = uint32(dictHead) & s.posBitMask - state2 = (s.state << maxPosBits) | posState - return -} - -// litState computes the literal state. -func (s *state) litState(prev byte, dictHead int64) uint32 { - lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) - litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | - (uint32(prev) >> (8 - lc)) - return litState -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go deleted file mode 100644 index 6e927e93..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -// treeCodec encodes or decodes values with a fixed bit size. It is using a -// tree of probability value. The root of the tree is the most-significant bit. -type treeCodec struct { - probTree -} - -// makeTreeCodec makes a tree codec. The bits value must be inside the range -// [1,32]. -func makeTreeCodec(bits int) treeCodec { - return treeCodec{makeProbTree(bits)} -} - -// deepcopy initializes tc as a deep copy of the source. -func (tc *treeCodec) deepcopy(src *treeCodec) { - tc.probTree.deepcopy(&src.probTree) -} - -// Encode uses the range encoder to encode a fixed-bit-size value. -func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { - m := uint32(1) - for i := int(tc.bits) - 1; i >= 0; i-- { - b := (v >> uint(i)) & 1 - if err := e.EncodeBit(b, &tc.probs[m]); err != nil { - return err - } - m = (m << 1) | b - } - return nil -} - -// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may -// be caused by the range decoder. -func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { - m := uint32(1) - for j := 0; j < int(tc.bits); j++ { - b, err := d.DecodeBit(&tc.probs[m]) - if err != nil { - return 0, err - } - m = (m << 1) | b - } - return m - (1 << uint(tc.bits)), nil -} - -// treeReverseCodec is another tree codec, where the least-significant bit is -// the start of the probability tree. -type treeReverseCodec struct { - probTree -} - -// deepcopy initializes the treeReverseCodec as a deep copy of the -// source. -func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { - tc.probTree.deepcopy(&src.probTree) -} - -// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must -// be in the range [1,32]. -func makeTreeReverseCodec(bits int) treeReverseCodec { - return treeReverseCodec{makeProbTree(bits)} -} - -// Encode uses range encoder to encode a fixed-bit-size value. The range -// encoder may cause errors. -func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { - m := uint32(1) - for i := uint(0); i < uint(tc.bits); i++ { - b := (v >> i) & 1 - if err := e.EncodeBit(b, &tc.probs[m]); err != nil { - return err - } - m = (m << 1) | b - } - return nil -} - -// Decodes uses the range decoder to decode a fixed-bit-size value. Errors -// returned by the range decoder will be returned. -func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { - m := uint32(1) - for j := uint(0); j < uint(tc.bits); j++ { - b, err := d.DecodeBit(&tc.probs[m]) - if err != nil { - return 0, err - } - m = (m << 1) | b - v |= b << j - } - return v, nil -} - -// probTree stores enough probability values to be used by the treeEncode and -// treeDecode methods of the range coder types. -type probTree struct { - probs []prob - bits byte -} - -// deepcopy initializes the probTree value as a deep copy of the source. -func (t *probTree) deepcopy(src *probTree) { - if t == src { - return - } - t.probs = make([]prob, len(src.probs)) - copy(t.probs, src.probs) - t.bits = src.bits -} - -// makeProbTree initializes a probTree structure. -func makeProbTree(bits int) probTree { - if !(1 <= bits && bits <= 32) { - panic("bits outside of range [1,32]") - } - t := probTree{ - bits: byte(bits), - probs: make([]prob, 1< 0 { - c.SizeInHeader = true - } - if !c.SizeInHeader { - c.EOSMarker = true - } -} - -// Verify checks WriterConfig for errors. Verify will replace zero -// values with default values. -func (c *WriterConfig) Verify() error { - c.fill() - var err error - if c == nil { - return errors.New("lzma: WriterConfig is nil") - } - if c.Properties == nil { - return errors.New("lzma: WriterConfig has no Properties set") - } - if err = c.Properties.verify(); err != nil { - return err - } - if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { - return errors.New("lzma: dictionary capacity is out of range") - } - if !(maxMatchLen <= c.BufSize) { - return errors.New("lzma: lookahead buffer size too small") - } - if c.SizeInHeader { - if c.Size < 0 { - return errors.New("lzma: negative size not supported") - } - } else if !c.EOSMarker { - return errors.New("lzma: EOS marker is required") - } - if err = c.Matcher.verify(); err != nil { - return err - } - - return nil -} - -// header returns the header structure for this configuration. -func (c *WriterConfig) header() header { - h := header{ - properties: *c.Properties, - dictCap: c.DictCap, - size: -1, - } - if c.SizeInHeader { - h.size = c.Size - } - return h -} - -// Writer writes an LZMA stream in the classic format. -type Writer struct { - h header - bw io.ByteWriter - buf *bufio.Writer - e *encoder -} - -// NewWriter creates a new LZMA writer for the classic format. The -// method will write the header to the underlying stream. -func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - w = &Writer{h: c.header()} - - var ok bool - w.bw, ok = lzma.(io.ByteWriter) - if !ok { - w.buf = bufio.NewWriter(lzma) - w.bw = w.buf - } - state := newState(w.h.properties) - m, err := c.Matcher.new(w.h.dictCap) - if err != nil { - return nil, err - } - dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) - if err != nil { - return nil, err - } - var flags encoderFlags - if c.EOSMarker { - flags = eosMarker - } - if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { - return nil, err - } - - if err = w.writeHeader(); err != nil { - return nil, err - } - return w, nil -} - -// NewWriter creates a new LZMA writer using the classic format. The -// function writes the header to the underlying stream. -func NewWriter(lzma io.Writer) (w *Writer, err error) { - return WriterConfig{}.NewWriter(lzma) -} - -// writeHeader writes the LZMA header into the stream. -func (w *Writer) writeHeader() error { - data, err := w.h.marshalBinary() - if err != nil { - return err - } - _, err = w.bw.(io.Writer).Write(data) - return err -} - -// Write puts data into the Writer. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.h.size >= 0 { - m := w.h.size - m -= w.e.Compressed() + int64(w.e.dict.Buffered()) - if m < 0 { - m = 0 - } - if m < int64(len(p)) { - p = p[:m] - err = ErrNoSpace - } - } - var werr error - if n, werr = w.e.Write(p); werr != nil { - err = werr - } - return n, err -} - -// Close closes the writer stream. It ensures that all data from the -// buffer will be compressed and the LZMA stream will be finished. -func (w *Writer) Close() error { - if w.h.size >= 0 { - n := w.e.Compressed() + int64(w.e.dict.Buffered()) - if n != w.h.size { - return errSize - } - } - err := w.e.Close() - if w.buf != nil { - ferr := w.buf.Flush() - if err == nil { - err = ferr - } - } - return err -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/mantle/vendor/github.com/ulikunitz/xz/lzma/writer2.go deleted file mode 100644 index dfaaec95..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzma/writer2.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package lzma - -import ( - "bytes" - "errors" - "io" -) - -// Writer2Config is used to create a Writer2 using parameters. -type Writer2Config struct { - // The properties for the encoding. If the it is nil the value - // {LC: 3, LP: 0, PB: 2} will be chosen. - Properties *Properties - // The capacity of the dictionary. If DictCap is zero, the value - // 8 MiB will be chosen. - DictCap int - // Size of the lookahead buffer; value 0 indicates default size - // 4096 - BufSize int - // Match algorithm - Matcher MatchAlgorithm -} - -// fill replaces zero values with default values. -func (c *Writer2Config) fill() { - if c.Properties == nil { - c.Properties = &Properties{LC: 3, LP: 0, PB: 2} - } - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } - if c.BufSize == 0 { - c.BufSize = 4096 - } -} - -// Verify checks the Writer2Config for correctness. Zero values will be -// replaced by default values. -func (c *Writer2Config) Verify() error { - c.fill() - var err error - if c == nil { - return errors.New("lzma: WriterConfig is nil") - } - if c.Properties == nil { - return errors.New("lzma: WriterConfig has no Properties set") - } - if err = c.Properties.verify(); err != nil { - return err - } - if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { - return errors.New("lzma: dictionary capacity is out of range") - } - if !(maxMatchLen <= c.BufSize) { - return errors.New("lzma: lookahead buffer size too small") - } - if c.Properties.LC+c.Properties.LP > 4 { - return errors.New("lzma: sum of lc and lp exceeds 4") - } - if err = c.Matcher.verify(); err != nil { - return err - } - return nil -} - -// Writer2 supports the creation of an LZMA2 stream. But note that -// written data is buffered, so call Flush or Close to write data to the -// underlying writer. The Close method writes the end-of-stream marker -// to the stream. So you may be able to concatenate the output of two -// writers as long the output of the first writer has only been flushed -// but not closed. -// -// Any change to the fields Properties, DictCap must be done before the -// first call to Write, Flush or Close. -type Writer2 struct { - w io.Writer - - start *state - encoder *encoder - - cstate chunkState - ctype chunkType - - buf bytes.Buffer - lbw LimitedByteWriter -} - -// NewWriter2 creates an LZMA2 chunk sequence writer with the default -// parameters and options. -func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { - return Writer2Config{}.NewWriter2(lzma2) -} - -// NewWriter2 creates a new LZMA2 writer using the given configuration. -func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - w = &Writer2{ - w: lzma2, - start: newState(*c.Properties), - cstate: start, - ctype: start.defaultChunkType(), - } - w.buf.Grow(maxCompressed) - w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} - m, err := c.Matcher.new(c.DictCap) - if err != nil { - return nil, err - } - d, err := newEncoderDict(c.DictCap, c.BufSize, m) - if err != nil { - return nil, err - } - w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) - if err != nil { - return nil, err - } - return w, nil -} - -// written returns the number of bytes written to the current chunk -func (w *Writer2) written() int { - if w.encoder == nil { - return 0 - } - return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() -} - -// errClosed indicates that the writer is closed. -var errClosed = errors.New("lzma: writer closed") - -// Writes data to LZMA2 stream. Note that written data will be buffered. -// Use Flush or Close to ensure that data is written to the underlying -// writer. -func (w *Writer2) Write(p []byte) (n int, err error) { - if w.cstate == stop { - return 0, errClosed - } - for n < len(p) { - m := maxUncompressed - w.written() - if m <= 0 { - panic("lzma: maxUncompressed reached") - } - var q []byte - if n+m < len(p) { - q = p[n : n+m] - } else { - q = p[n:] - } - k, err := w.encoder.Write(q) - n += k - if err != nil && err != ErrLimit { - return n, err - } - if err == ErrLimit || k == m { - if err = w.flushChunk(); err != nil { - return n, err - } - } - } - return n, nil -} - -// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 -// stream. -func (w *Writer2) writeUncompressedChunk() error { - u := w.encoder.Compressed() - if u <= 0 { - return errors.New("lzma: can't write empty uncompressed chunk") - } - if u > maxUncompressed { - panic("overrun of uncompressed data limit") - } - switch w.ctype { - case cLRND: - w.ctype = cUD - default: - w.ctype = cU - } - w.encoder.state = w.start - - header := chunkHeader{ - ctype: w.ctype, - uncompressed: uint32(u - 1), - } - hdata, err := header.MarshalBinary() - if err != nil { - return err - } - if _, err = w.w.Write(hdata); err != nil { - return err - } - _, err = w.encoder.dict.CopyN(w.w, int(u)) - return err -} - -// writeCompressedChunk writes a compressed chunk to the underlying -// writer. -func (w *Writer2) writeCompressedChunk() error { - if w.ctype == cU || w.ctype == cUD { - panic("chunk type uncompressed") - } - - u := w.encoder.Compressed() - if u <= 0 { - return errors.New("writeCompressedChunk: empty chunk") - } - if u > maxUncompressed { - panic("overrun of uncompressed data limit") - } - c := w.buf.Len() - if c <= 0 { - panic("no compressed data") - } - if c > maxCompressed { - panic("overrun of compressed data limit") - } - header := chunkHeader{ - ctype: w.ctype, - uncompressed: uint32(u - 1), - compressed: uint16(c - 1), - props: w.encoder.state.Properties, - } - hdata, err := header.MarshalBinary() - if err != nil { - return err - } - if _, err = w.w.Write(hdata); err != nil { - return err - } - _, err = io.Copy(w.w, &w.buf) - return err -} - -// writes a single chunk to the underlying writer. -func (w *Writer2) writeChunk() error { - u := int(uncompressedHeaderLen + w.encoder.Compressed()) - c := headerLen(w.ctype) + w.buf.Len() - if u < c { - return w.writeUncompressedChunk() - } - return w.writeCompressedChunk() -} - -// flushChunk terminates the current chunk. The encoder will be reset -// to support the next chunk. -func (w *Writer2) flushChunk() error { - if w.written() == 0 { - return nil - } - var err error - if err = w.encoder.Close(); err != nil { - return err - } - if err = w.writeChunk(); err != nil { - return err - } - w.buf.Reset() - w.lbw.N = maxCompressed - if err = w.encoder.Reopen(&w.lbw); err != nil { - return err - } - if err = w.cstate.next(w.ctype); err != nil { - return err - } - w.ctype = w.cstate.defaultChunkType() - w.start = cloneState(w.encoder.state) - return nil -} - -// Flush writes all buffered data out to the underlying stream. This -// could result in multiple chunks to be created. -func (w *Writer2) Flush() error { - if w.cstate == stop { - return errClosed - } - for w.written() > 0 { - if err := w.flushChunk(); err != nil { - return err - } - } - return nil -} - -// Close terminates the LZMA2 stream with an EOS chunk. -func (w *Writer2) Close() error { - if w.cstate == stop { - return errClosed - } - if err := w.Flush(); err != nil { - return nil - } - // write zero byte EOS chunk - _, err := w.w.Write([]byte{0}) - if err != nil { - return err - } - w.cstate = stop - return nil -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/lzmafilter.go b/mantle/vendor/github.com/ulikunitz/xz/lzmafilter.go deleted file mode 100644 index 4f1bb339..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/lzmafilter.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xz - -import ( - "errors" - "fmt" - "io" - - "github.com/ulikunitz/xz/lzma" -) - -// LZMA filter constants. -const ( - lzmaFilterID = 0x21 - lzmaFilterLen = 3 -) - -// lzmaFilter declares the LZMA2 filter information stored in an xz -// block header. -type lzmaFilter struct { - dictCap int64 -} - -// String returns a representation of the LZMA filter. -func (f lzmaFilter) String() string { - return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) -} - -// id returns the ID for the LZMA2 filter. -func (f lzmaFilter) id() uint64 { return lzmaFilterID } - -// MarshalBinary converts the lzmaFilter in its encoded representation. -func (f lzmaFilter) MarshalBinary() (data []byte, err error) { - c := lzma.EncodeDictCap(f.dictCap) - return []byte{lzmaFilterID, 1, c}, nil -} - -// UnmarshalBinary unmarshals the given data representation of the LZMA2 -// filter. -func (f *lzmaFilter) UnmarshalBinary(data []byte) error { - if len(data) != lzmaFilterLen { - return errors.New("xz: data for LZMA2 filter has wrong length") - } - if data[0] != lzmaFilterID { - return errors.New("xz: wrong LZMA2 filter id") - } - if data[1] != 1 { - return errors.New("xz: wrong LZMA2 filter size") - } - dc, err := lzma.DecodeDictCap(data[2]) - if err != nil { - return errors.New("xz: wrong LZMA2 dictionary size property") - } - - f.dictCap = dc - return nil -} - -// reader creates a new reader for the LZMA2 filter. -func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, - err error) { - - config := new(lzma.Reader2Config) - if c != nil { - config.DictCap = c.DictCap - } - dc := int(f.dictCap) - if dc < 1 { - return nil, errors.New("xz: LZMA2 filter parameter " + - "dictionary capacity overflow") - } - if dc > config.DictCap { - config.DictCap = dc - } - - fr, err = config.NewReader2(r) - if err != nil { - return nil, err - } - return fr, nil -} - -// writeCloser creates a io.WriteCloser for the LZMA2 filter. -func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, -) (fw io.WriteCloser, err error) { - config := new(lzma.Writer2Config) - if c != nil { - *config = lzma.Writer2Config{ - Properties: c.Properties, - DictCap: c.DictCap, - BufSize: c.BufSize, - Matcher: c.Matcher, - } - } - - dc := int(f.dictCap) - if dc < 1 { - return nil, errors.New("xz: LZMA2 filter parameter " + - "dictionary capacity overflow") - } - if dc > config.DictCap { - config.DictCap = dc - } - - fw, err = config.NewWriter2(w) - if err != nil { - return nil, err - } - return fw, nil -} - -// last returns true, because an LZMA2 filter must be the last filter in -// the filter list. -func (f lzmaFilter) last() bool { return true } diff --git a/mantle/vendor/github.com/ulikunitz/xz/make-docs b/mantle/vendor/github.com/ulikunitz/xz/make-docs deleted file mode 100644 index a8c612ce..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/make-docs +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -set -x -pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md -pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/mantle/vendor/github.com/ulikunitz/xz/none-check.go b/mantle/vendor/github.com/ulikunitz/xz/none-check.go deleted file mode 100644 index 95240135..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/none-check.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xz - -import "hash" - -type noneHash struct{} - -func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } - -func (h noneHash) Sum(b []byte) []byte { return b } - -func (h noneHash) Reset() {} - -func (h noneHash) Size() int { return 0 } - -func (h noneHash) BlockSize() int { return 0 } - -func newNoneHash() hash.Hash { - return &noneHash{} -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/reader.go b/mantle/vendor/github.com/ulikunitz/xz/reader.go deleted file mode 100644 index 7f974ffc..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/reader.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package xz supports the compression and decompression of xz files. It -// supports version 1.0.4 of the specification without the non-LZMA2 -// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt -package xz - -import ( - "bytes" - "errors" - "fmt" - "hash" - "io" - - "github.com/ulikunitz/xz/internal/xlog" - "github.com/ulikunitz/xz/lzma" -) - -// ReaderConfig defines the parameters for the xz reader. The -// SingleStream parameter requests the reader to assume that the -// underlying stream contains only a single stream. -type ReaderConfig struct { - DictCap int - SingleStream bool -} - -// Verify checks the reader parameters for Validity. Zero values will be -// replaced by default values. -func (c *ReaderConfig) Verify() error { - if c == nil { - return errors.New("xz: reader parameters are nil") - } - lc := lzma.Reader2Config{DictCap: c.DictCap} - if err := lc.Verify(); err != nil { - return err - } - return nil -} - -// Reader supports the reading of one or multiple xz streams. -type Reader struct { - ReaderConfig - - xz io.Reader - sr *streamReader -} - -// streamReader decodes a single xz stream -type streamReader struct { - ReaderConfig - - xz io.Reader - br *blockReader - newHash func() hash.Hash - h header - index []record -} - -// NewReader creates a new xz reader using the default parameters. -// The function reads and checks the header of the first XZ stream. The -// reader will process multiple streams including padding. -func NewReader(xz io.Reader) (r *Reader, err error) { - return ReaderConfig{}.NewReader(xz) -} - -// NewReader creates an xz stream reader. The created reader will be -// able to process multiple streams and padding unless a SingleStream -// has been set in the reader configuration c. -func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - r = &Reader{ - ReaderConfig: c, - xz: xz, - } - if r.sr, err = c.newStreamReader(xz); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - return r, nil -} - -var errUnexpectedData = errors.New("xz: unexpected data after stream") - -// Read reads uncompressed data from the stream. -func (r *Reader) Read(p []byte) (n int, err error) { - for n < len(p) { - if r.sr == nil { - if r.SingleStream { - data := make([]byte, 1) - _, err = io.ReadFull(r.xz, data) - if err != io.EOF { - return n, errUnexpectedData - } - return n, io.EOF - } - for { - r.sr, err = r.ReaderConfig.newStreamReader(r.xz) - if err != errPadding { - break - } - } - if err != nil { - return n, err - } - } - k, err := r.sr.Read(p[n:]) - n += k - if err != nil { - if err == io.EOF { - r.sr = nil - continue - } - return n, err - } - } - return n, nil -} - -var errPadding = errors.New("xz: padding (4 zero bytes) encountered") - -// newStreamReader creates a new xz stream reader using the given configuration -// parameters. NewReader reads and checks the header of the xz stream. -func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - data := make([]byte, HeaderLen) - if _, err := io.ReadFull(xz, data[:4]); err != nil { - return nil, err - } - if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { - return nil, errPadding - } - if _, err = io.ReadFull(xz, data[4:]); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - r = &streamReader{ - ReaderConfig: c, - xz: xz, - index: make([]record, 0, 4), - } - if err = r.h.UnmarshalBinary(data); err != nil { - return nil, err - } - xlog.Debugf("xz header %s", r.h) - if r.newHash, err = newHashFunc(r.h.flags); err != nil { - return nil, err - } - return r, nil -} - -// readTail reads the index body and the xz footer. -func (r *streamReader) readTail() error { - index, n, err := readIndexBody(r.xz, len(r.index)) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - - for i, rec := range r.index { - if rec != index[i] { - return fmt.Errorf("xz: record %d is %v; want %v", - i, rec, index[i]) - } - } - - p := make([]byte, footerLen) - if _, err = io.ReadFull(r.xz, p); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - var f footer - if err = f.UnmarshalBinary(p); err != nil { - return err - } - xlog.Debugf("xz footer %s", f) - if f.flags != r.h.flags { - return errors.New("xz: footer flags incorrect") - } - if f.indexSize != int64(n)+1 { - return errors.New("xz: index size in footer wrong") - } - return nil -} - -// Read reads actual data from the xz stream. -func (r *streamReader) Read(p []byte) (n int, err error) { - for n < len(p) { - if r.br == nil { - bh, hlen, err := readBlockHeader(r.xz) - if err != nil { - if err == errIndexIndicator { - if err = r.readTail(); err != nil { - return n, err - } - return n, io.EOF - } - return n, err - } - xlog.Debugf("block %v", *bh) - r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, - hlen, r.newHash()) - if err != nil { - return n, err - } - } - k, err := r.br.Read(p[n:]) - n += k - if err != nil { - if err == io.EOF { - r.index = append(r.index, r.br.record()) - r.br = nil - } else { - return n, err - } - } - } - return n, nil -} - -// countingReader is a reader that counts the bytes read. -type countingReader struct { - r io.Reader - n int64 -} - -// Read reads data from the wrapped reader and adds it to the n field. -func (lr *countingReader) Read(p []byte) (n int, err error) { - n, err = lr.r.Read(p) - lr.n += int64(n) - return n, err -} - -// blockReader supports the reading of a block. -type blockReader struct { - lxz countingReader - header *blockHeader - headerLen int - n int64 - hash hash.Hash - r io.Reader -} - -// newBlockReader creates a new block reader. -func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, - hlen int, hash hash.Hash) (br *blockReader, err error) { - - br = &blockReader{ - lxz: countingReader{r: xz}, - header: h, - headerLen: hlen, - hash: hash, - } - - fr, err := c.newFilterReader(&br.lxz, h.filters) - if err != nil { - return nil, err - } - if br.hash.Size() != 0 { - br.r = io.TeeReader(fr, br.hash) - } else { - br.r = fr - } - - return br, nil -} - -// uncompressedSize returns the uncompressed size of the block. -func (br *blockReader) uncompressedSize() int64 { - return br.n -} - -// compressedSize returns the compressed size of the block. -func (br *blockReader) compressedSize() int64 { - return br.lxz.n -} - -// unpaddedSize computes the unpadded size for the block. -func (br *blockReader) unpaddedSize() int64 { - n := int64(br.headerLen) - n += br.compressedSize() - n += int64(br.hash.Size()) - return n -} - -// record returns the index record for the current block. -func (br *blockReader) record() record { - return record{br.unpaddedSize(), br.uncompressedSize()} -} - -// Read reads data from the block. -func (br *blockReader) Read(p []byte) (n int, err error) { - n, err = br.r.Read(p) - br.n += int64(n) - - u := br.header.uncompressedSize - if u >= 0 && br.uncompressedSize() > u { - return n, errors.New("xz: wrong uncompressed size for block") - } - c := br.header.compressedSize - if c >= 0 && br.compressedSize() > c { - return n, errors.New("xz: wrong compressed size for block") - } - if err != io.EOF { - return n, err - } - if br.uncompressedSize() < u || br.compressedSize() < c { - return n, io.ErrUnexpectedEOF - } - - s := br.hash.Size() - k := padLen(br.lxz.n) - q := make([]byte, k+s, k+2*s) - if _, err = io.ReadFull(br.lxz.r, q); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return n, err - } - if !allZeros(q[:k]) { - return n, errors.New("xz: non-zero block padding") - } - checkSum := q[k:] - computedSum := br.hash.Sum(checkSum[s:]) - if !bytes.Equal(checkSum, computedSum) { - return n, errors.New("xz: checksum error for block") - } - return n, io.EOF -} - -func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, - err error) { - - if err = verifyFilters(f); err != nil { - return nil, err - } - - fr = r - for i := len(f) - 1; i >= 0; i-- { - fr, err = f[i].reader(fr, c) - if err != nil { - return nil, err - } - } - return fr, nil -} diff --git a/mantle/vendor/github.com/ulikunitz/xz/writer.go b/mantle/vendor/github.com/ulikunitz/xz/writer.go deleted file mode 100644 index 6b3a6662..00000000 --- a/mantle/vendor/github.com/ulikunitz/xz/writer.go +++ /dev/null @@ -1,399 +0,0 @@ -// Copyright 2014-2021 Ulrich Kunitz. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package xz - -import ( - "errors" - "fmt" - "hash" - "io" - - "github.com/ulikunitz/xz/lzma" -) - -// WriterConfig describe the parameters for an xz writer. -type WriterConfig struct { - Properties *lzma.Properties - DictCap int - BufSize int - BlockSize int64 - // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) - CheckSum byte - // Forces NoChecksum (default: false) - NoCheckSum bool - // match algorithm - Matcher lzma.MatchAlgorithm -} - -// fill replaces zero values with default values. -func (c *WriterConfig) fill() { - if c.Properties == nil { - c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} - } - if c.DictCap == 0 { - c.DictCap = 8 * 1024 * 1024 - } - if c.BufSize == 0 { - c.BufSize = 4096 - } - if c.BlockSize == 0 { - c.BlockSize = maxInt64 - } - if c.CheckSum == 0 { - c.CheckSum = CRC64 - } - if c.NoCheckSum { - c.CheckSum = None - } -} - -// Verify checks the configuration for errors. Zero values will be -// replaced by default values. -func (c *WriterConfig) Verify() error { - if c == nil { - return errors.New("xz: writer configuration is nil") - } - c.fill() - lc := lzma.Writer2Config{ - Properties: c.Properties, - DictCap: c.DictCap, - BufSize: c.BufSize, - Matcher: c.Matcher, - } - if err := lc.Verify(); err != nil { - return err - } - if c.BlockSize <= 0 { - return errors.New("xz: block size out of range") - } - if err := verifyFlags(c.CheckSum); err != nil { - return err - } - return nil -} - -// filters creates the filter list for the given parameters. -func (c *WriterConfig) filters() []filter { - return []filter{&lzmaFilter{int64(c.DictCap)}} -} - -// maxInt64 defines the maximum 64-bit signed integer. -const maxInt64 = 1<<63 - 1 - -// verifyFilters checks the filter list for the length and the right -// sequence of filters. -func verifyFilters(f []filter) error { - if len(f) == 0 { - return errors.New("xz: no filters") - } - if len(f) > 4 { - return errors.New("xz: more than four filters") - } - for _, g := range f[:len(f)-1] { - if g.last() { - return errors.New("xz: last filter is not last") - } - } - if !f[len(f)-1].last() { - return errors.New("xz: wrong last filter") - } - return nil -} - -// newFilterWriteCloser converts a filter list into a WriteCloser that -// can be used by a blockWriter. -func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { - if err = verifyFilters(f); err != nil { - return nil, err - } - fw = nopWriteCloser(w) - for i := len(f) - 1; i >= 0; i-- { - fw, err = f[i].writeCloser(fw, c) - if err != nil { - return nil, err - } - } - return fw, nil -} - -// nopWCloser implements a WriteCloser with a Close method not doing -// anything. -type nopWCloser struct { - io.Writer -} - -// Close returns nil and doesn't do anything else. -func (c nopWCloser) Close() error { - return nil -} - -// nopWriteCloser converts the Writer into a WriteCloser with a Close -// function that does nothing beside returning nil. -func nopWriteCloser(w io.Writer) io.WriteCloser { - return nopWCloser{w} -} - -// Writer compresses data written to it. It is an io.WriteCloser. -type Writer struct { - WriterConfig - - xz io.Writer - bw *blockWriter - newHash func() hash.Hash - h header - index []record - closed bool -} - -// newBlockWriter creates a new block writer writes the header out. -func (w *Writer) newBlockWriter() error { - var err error - w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) - if err != nil { - return err - } - if err = w.bw.writeHeader(w.xz); err != nil { - return err - } - return nil -} - -// closeBlockWriter closes a block writer and records the sizes in the -// index. -func (w *Writer) closeBlockWriter() error { - var err error - if err = w.bw.Close(); err != nil { - return err - } - w.index = append(w.index, w.bw.record()) - return nil -} - -// NewWriter creates a new xz writer using default parameters. -func NewWriter(xz io.Writer) (w *Writer, err error) { - return WriterConfig{}.NewWriter(xz) -} - -// NewWriter creates a new Writer using the given configuration parameters. -func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { - if err = c.Verify(); err != nil { - return nil, err - } - w = &Writer{ - WriterConfig: c, - xz: xz, - h: header{c.CheckSum}, - index: make([]record, 0, 4), - } - if w.newHash, err = newHashFunc(c.CheckSum); err != nil { - return nil, err - } - data, err := w.h.MarshalBinary() - if err != nil { - return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) - } - if _, err = xz.Write(data); err != nil { - return nil, err - } - if err = w.newBlockWriter(); err != nil { - return nil, err - } - return w, nil - -} - -// Write compresses the uncompressed data provided. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.closed { - return 0, errClosed - } - for { - k, err := w.bw.Write(p[n:]) - n += k - if err != errNoSpace { - return n, err - } - if err = w.closeBlockWriter(); err != nil { - return n, err - } - if err = w.newBlockWriter(); err != nil { - return n, err - } - } -} - -// Close closes the writer and adds the footer to the Writer. Close -// doesn't close the underlying writer. -func (w *Writer) Close() error { - if w.closed { - return errClosed - } - w.closed = true - var err error - if err = w.closeBlockWriter(); err != nil { - return err - } - - f := footer{flags: w.h.flags} - if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { - return err - } - data, err := f.MarshalBinary() - if err != nil { - return err - } - if _, err = w.xz.Write(data); err != nil { - return err - } - return nil -} - -// countingWriter is a writer that counts all data written to it. -type countingWriter struct { - w io.Writer - n int64 -} - -// Write writes data to the countingWriter. -func (cw *countingWriter) Write(p []byte) (n int, err error) { - n, err = cw.w.Write(p) - cw.n += int64(n) - if err == nil && cw.n < 0 { - return n, errors.New("xz: counter overflow") - } - return -} - -// blockWriter is writes a single block. -type blockWriter struct { - cxz countingWriter - // mw combines io.WriteCloser w and the hash. - mw io.Writer - w io.WriteCloser - n int64 - blockSize int64 - closed bool - headerLen int - - filters []filter - hash hash.Hash -} - -// newBlockWriter creates a new block writer. -func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { - bw = &blockWriter{ - cxz: countingWriter{w: xz}, - blockSize: c.BlockSize, - filters: c.filters(), - hash: hash, - } - bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) - if err != nil { - return nil, err - } - if bw.hash.Size() != 0 { - bw.mw = io.MultiWriter(bw.w, bw.hash) - } else { - bw.mw = bw.w - } - return bw, nil -} - -// writeHeader writes the header. If the function is called after Close -// the commpressedSize and uncompressedSize fields will be filled. -func (bw *blockWriter) writeHeader(w io.Writer) error { - h := blockHeader{ - compressedSize: -1, - uncompressedSize: -1, - filters: bw.filters, - } - if bw.closed { - h.compressedSize = bw.compressedSize() - h.uncompressedSize = bw.uncompressedSize() - } - data, err := h.MarshalBinary() - if err != nil { - return err - } - if _, err = w.Write(data); err != nil { - return err - } - bw.headerLen = len(data) - return nil -} - -// compressed size returns the amount of data written to the underlying -// stream. -func (bw *blockWriter) compressedSize() int64 { - return bw.cxz.n -} - -// uncompressedSize returns the number of data written to the -// blockWriter -func (bw *blockWriter) uncompressedSize() int64 { - return bw.n -} - -// unpaddedSize returns the sum of the header length, the uncompressed -// size of the block and the hash size. -func (bw *blockWriter) unpaddedSize() int64 { - if bw.headerLen <= 0 { - panic("xz: block header not written") - } - n := int64(bw.headerLen) - n += bw.compressedSize() - n += int64(bw.hash.Size()) - return n -} - -// record returns the record for the current stream. Call Close before -// calling this method. -func (bw *blockWriter) record() record { - return record{bw.unpaddedSize(), bw.uncompressedSize()} -} - -var errClosed = errors.New("xz: writer already closed") - -var errNoSpace = errors.New("xz: no space") - -// Write writes uncompressed data to the block writer. -func (bw *blockWriter) Write(p []byte) (n int, err error) { - if bw.closed { - return 0, errClosed - } - - t := bw.blockSize - bw.n - if int64(len(p)) > t { - err = errNoSpace - p = p[:t] - } - - var werr error - n, werr = bw.mw.Write(p) - bw.n += int64(n) - if werr != nil { - return n, werr - } - return n, err -} - -// Close closes the writer. -func (bw *blockWriter) Close() error { - if bw.closed { - return errClosed - } - bw.closed = true - if err := bw.w.Close(); err != nil { - return err - } - s := bw.hash.Size() - k := padLen(bw.cxz.n) - p := make([]byte, k+s) - bw.hash.Sum(p[k:k]) - if _, err := bw.cxz.w.Write(p); err != nil { - return err - } - return nil -} diff --git a/mantle/vendor/github.com/xeipuuv/gojsonschema/go.mod b/mantle/vendor/github.com/xeipuuv/gojsonschema/go.mod deleted file mode 100644 index b709d7fc..00000000 --- a/mantle/vendor/github.com/xeipuuv/gojsonschema/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/xeipuuv/gojsonschema - -require ( - github.com/stretchr/testify v1.3.0 - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 -) diff --git a/mantle/vendor/github.com/xeipuuv/gojsonschema/go.sum b/mantle/vendor/github.com/xeipuuv/gojsonschema/go.sum deleted file mode 100644 index 0e865ac7..00000000 --- a/mantle/vendor/github.com/xeipuuv/gojsonschema/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= diff --git a/mantle/vendor/go.opencensus.io/go.mod b/mantle/vendor/go.opencensus.io/go.mod deleted file mode 100644 index 29707156..00000000 --- a/mantle/vendor/go.opencensus.io/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module go.opencensus.io - -require ( - github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 - github.com/golang/protobuf v1.3.1 - github.com/google/go-cmp v0.3.0 - github.com/stretchr/testify v1.4.0 - golang.org/x/net v0.0.0-20190620200207-3b0461eec859 - golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect - golang.org/x/text v0.3.3 // indirect - google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect - google.golang.org/grpc v1.20.1 -) - -go 1.13 diff --git a/mantle/vendor/go.opencensus.io/go.sum b/mantle/vendor/go.opencensus.io/go.sum deleted file mode 100644 index 01c02972..00000000 --- a/mantle/vendor/go.opencensus.io/go.sum +++ /dev/null @@ -1,74 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/mantle/vendor/golang.org/x/crypto/argon2/argon2.go b/mantle/vendor/golang.org/x/crypto/argon2/argon2.go deleted file mode 100644 index b423feae..00000000 --- a/mantle/vendor/golang.org/x/crypto/argon2/argon2.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package argon2 implements the key derivation function Argon2. -// Argon2 was selected as the winner of the Password Hashing Competition and can -// be used to derive cryptographic keys from passwords. -// -// For a detailed specification of Argon2 see [1]. -// -// If you aren't sure which function you need, use Argon2id (IDKey) and -// the parameter recommendations for your scenario. -// -// -// Argon2i -// -// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. -// It uses data-independent memory access, which is preferred for password -// hashing and password-based key derivation. Argon2i requires more passes over -// memory than Argon2id to protect from trade-off attacks. The recommended -// parameters (taken from [2]) for non-interactive operations are time=3 and to -// use the maximum available memory. -// -// -// Argon2id -// -// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining -// Argon2i and Argon2d. It uses data-independent memory access for the first -// half of the first iteration over the memory and data-dependent memory access -// for the rest. Argon2id is side-channel resistant and provides better brute- -// force cost savings due to time-memory tradeoffs than Argon2i. The recommended -// parameters for non-interactive operations (taken from [2]) are time=1 and to -// use the maximum available memory. -// -// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf -// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 -package argon2 - -import ( - "encoding/binary" - "sync" - - "golang.org/x/crypto/blake2b" -) - -// The Argon2 version implemented by this package. -const Version = 0x13 - -const ( - argon2d = iota - argon2i - argon2id -) - -// Key derives a key from the password, salt, and cost parameters using Argon2i -// returning a byte slice of length keyLen that can be used as cryptographic -// key. The CPU cost and parallelism degree must be greater than zero. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) -// -// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. -// If using that amount of memory (32 MB) is not possible in some contexts then -// the time parameter can be increased to compensate. -// -// The time parameter specifies the number of passes over the memory and the -// memory parameter specifies the size of the memory in KiB. For example -// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be -// adjusted to the number of available CPUs. The cost parameters should be -// increased as memory latency and CPU parallelism increases. Remember to get a -// good random salt. -func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { - return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) -} - -// IDKey derives a key from the password, salt, and cost parameters using -// Argon2id returning a byte slice of length keyLen that can be used as -// cryptographic key. The CPU cost and parallelism degree must be greater than -// zero. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) -// -// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. -// If using that amount of memory (64 MB) is not possible in some contexts then -// the time parameter can be increased to compensate. -// -// The time parameter specifies the number of passes over the memory and the -// memory parameter specifies the size of the memory in KiB. For example -// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be -// adjusted to the numbers of available CPUs. The cost parameters should be -// increased as memory latency and CPU parallelism increases. Remember to get a -// good random salt. -func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { - return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) -} - -func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { - if time < 1 { - panic("argon2: number of rounds too small") - } - if threads < 1 { - panic("argon2: parallelism degree too low") - } - h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) - - memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) - if memory < 2*syncPoints*uint32(threads) { - memory = 2 * syncPoints * uint32(threads) - } - B := initBlocks(&h0, memory, uint32(threads)) - processBlocks(B, time, memory, uint32(threads), mode) - return extractKey(B, memory, uint32(threads), keyLen) -} - -const ( - blockLength = 128 - syncPoints = 4 -) - -type block [blockLength]uint64 - -func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { - var ( - h0 [blake2b.Size + 8]byte - params [24]byte - tmp [4]byte - ) - - b2, _ := blake2b.New512(nil) - binary.LittleEndian.PutUint32(params[0:4], threads) - binary.LittleEndian.PutUint32(params[4:8], keyLen) - binary.LittleEndian.PutUint32(params[8:12], memory) - binary.LittleEndian.PutUint32(params[12:16], time) - binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) - binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) - b2.Write(params[:]) - binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) - b2.Write(tmp[:]) - b2.Write(password) - binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) - b2.Write(tmp[:]) - b2.Write(salt) - binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) - b2.Write(tmp[:]) - b2.Write(key) - binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) - b2.Write(tmp[:]) - b2.Write(data) - b2.Sum(h0[:0]) - return h0 -} - -func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { - var block0 [1024]byte - B := make([]block, memory) - for lane := uint32(0); lane < threads; lane++ { - j := lane * (memory / threads) - binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) - - binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) - blake2bHash(block0[:], h0[:]) - for i := range B[j+0] { - B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) - } - - binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) - blake2bHash(block0[:], h0[:]) - for i := range B[j+1] { - B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) - } - } - return B -} - -func processBlocks(B []block, time, memory, threads uint32, mode int) { - lanes := memory / threads - segments := lanes / syncPoints - - processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { - var addresses, in, zero block - if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { - in[0] = uint64(n) - in[1] = uint64(lane) - in[2] = uint64(slice) - in[3] = uint64(memory) - in[4] = uint64(time) - in[5] = uint64(mode) - } - - index := uint32(0) - if n == 0 && slice == 0 { - index = 2 // we have already generated the first two blocks - if mode == argon2i || mode == argon2id { - in[6]++ - processBlock(&addresses, &in, &zero) - processBlock(&addresses, &addresses, &zero) - } - } - - offset := lane*lanes + slice*segments + index - var random uint64 - for index < segments { - prev := offset - 1 - if index == 0 && slice == 0 { - prev += lanes // last block in lane - } - if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { - if index%blockLength == 0 { - in[6]++ - processBlock(&addresses, &in, &zero) - processBlock(&addresses, &addresses, &zero) - } - random = addresses[index%blockLength] - } else { - random = B[prev][0] - } - newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) - processBlockXOR(&B[offset], &B[prev], &B[newOffset]) - index, offset = index+1, offset+1 - } - wg.Done() - } - - for n := uint32(0); n < time; n++ { - for slice := uint32(0); slice < syncPoints; slice++ { - var wg sync.WaitGroup - for lane := uint32(0); lane < threads; lane++ { - wg.Add(1) - go processSegment(n, slice, lane, &wg) - } - wg.Wait() - } - } - -} - -func extractKey(B []block, memory, threads, keyLen uint32) []byte { - lanes := memory / threads - for lane := uint32(0); lane < threads-1; lane++ { - for i, v := range B[(lane*lanes)+lanes-1] { - B[memory-1][i] ^= v - } - } - - var block [1024]byte - for i, v := range B[memory-1] { - binary.LittleEndian.PutUint64(block[i*8:], v) - } - key := make([]byte, keyLen) - blake2bHash(key, block[:]) - return key -} - -func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { - refLane := uint32(rand>>32) % threads - if n == 0 && slice == 0 { - refLane = lane - } - m, s := 3*segments, ((slice+1)%syncPoints)*segments - if lane == refLane { - m += index - } - if n == 0 { - m, s = slice*segments, 0 - if slice == 0 || lane == refLane { - m += index - } - } - if index == 0 || lane == refLane { - m-- - } - return phi(rand, uint64(m), uint64(s), refLane, lanes) -} - -func phi(rand, m, s uint64, lane, lanes uint32) uint32 { - p := rand & 0xFFFFFFFF - p = (p * p) >> 32 - p = (p * m) >> 32 - return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) -} diff --git a/mantle/vendor/golang.org/x/crypto/argon2/blake2b.go b/mantle/vendor/golang.org/x/crypto/argon2/blake2b.go deleted file mode 100644 index 10f46948..00000000 --- a/mantle/vendor/golang.org/x/crypto/argon2/blake2b.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package argon2 - -import ( - "encoding/binary" - "hash" - - "golang.org/x/crypto/blake2b" -) - -// blake2bHash computes an arbitrary long hash value of in -// and writes the hash to out. -func blake2bHash(out []byte, in []byte) { - var b2 hash.Hash - if n := len(out); n < blake2b.Size { - b2, _ = blake2b.New(n, nil) - } else { - b2, _ = blake2b.New512(nil) - } - - var buffer [blake2b.Size]byte - binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) - b2.Write(buffer[:4]) - b2.Write(in) - - if len(out) <= blake2b.Size { - b2.Sum(out[:0]) - return - } - - outLen := len(out) - b2.Sum(buffer[:0]) - b2.Reset() - copy(out, buffer[:32]) - out = out[32:] - for len(out) > blake2b.Size { - b2.Write(buffer[:]) - b2.Sum(buffer[:0]) - copy(out, buffer[:32]) - out = out[32:] - b2.Reset() - } - - if outLen%blake2b.Size > 0 { // outLen > 64 - r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 - b2, _ = blake2b.New(outLen-32*r, nil) - } - b2.Write(buffer[:]) - b2.Sum(out[:0]) -} diff --git a/mantle/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/mantle/vendor/golang.org/x/crypto/argon2/blamka_amd64.go deleted file mode 100644 index a014ac92..00000000 --- a/mantle/vendor/golang.org/x/crypto/argon2/blamka_amd64.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -package argon2 - -import "golang.org/x/sys/cpu" - -func init() { - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func mixBlocksSSE2(out, a, b, c *block) - -//go:noescape -func xorBlocksSSE2(out, a, b, c *block) - -//go:noescape -func blamkaSSE4(b *block) - -func processBlockSSE(out, in1, in2 *block, xor bool) { - var t block - mixBlocksSSE2(&t, in1, in2, &t) - if useSSE4 { - blamkaSSE4(&t) - } else { - for i := 0; i < blockLength; i += 16 { - blamkaGeneric( - &t[i+0], &t[i+1], &t[i+2], &t[i+3], - &t[i+4], &t[i+5], &t[i+6], &t[i+7], - &t[i+8], &t[i+9], &t[i+10], &t[i+11], - &t[i+12], &t[i+13], &t[i+14], &t[i+15], - ) - } - for i := 0; i < blockLength/8; i += 2 { - blamkaGeneric( - &t[i], &t[i+1], &t[16+i], &t[16+i+1], - &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], - &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], - &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], - ) - } - } - if xor { - xorBlocksSSE2(out, in1, in2, &t) - } else { - mixBlocksSSE2(out, in1, in2, &t) - } -} - -func processBlock(out, in1, in2 *block) { - processBlockSSE(out, in1, in2, false) -} - -func processBlockXOR(out, in1, in2 *block) { - processBlockSSE(out, in1, in2, true) -} diff --git a/mantle/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/mantle/vendor/golang.org/x/crypto/argon2/blamka_amd64.s deleted file mode 100644 index b2cc0515..00000000 --- a/mantle/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFD $0xB1, v6, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - PSHUFB c40, v2; \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFB c48, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - MOVO v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v7, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - PSHUFB c40, v3; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFB c48, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - MOVO v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG_0(block, off) \ - MOVOU 8*(off+0)(block), X0; \ - MOVOU 8*(off+2)(block), X1; \ - MOVOU 8*(off+4)(block), X2; \ - MOVOU 8*(off+6)(block), X3; \ - MOVOU 8*(off+8)(block), X4; \ - MOVOU 8*(off+10)(block), X5; \ - MOVOU 8*(off+12)(block), X6; \ - MOVOU 8*(off+14)(block), X7 - -#define STORE_MSG_0(block, off) \ - MOVOU X0, 8*(off+0)(block); \ - MOVOU X1, 8*(off+2)(block); \ - MOVOU X2, 8*(off+4)(block); \ - MOVOU X3, 8*(off+6)(block); \ - MOVOU X4, 8*(off+8)(block); \ - MOVOU X5, 8*(off+10)(block); \ - MOVOU X6, 8*(off+12)(block); \ - MOVOU X7, 8*(off+14)(block) - -#define LOAD_MSG_1(block, off) \ - MOVOU 8*off+0*8(block), X0; \ - MOVOU 8*off+16*8(block), X1; \ - MOVOU 8*off+32*8(block), X2; \ - MOVOU 8*off+48*8(block), X3; \ - MOVOU 8*off+64*8(block), X4; \ - MOVOU 8*off+80*8(block), X5; \ - MOVOU 8*off+96*8(block), X6; \ - MOVOU 8*off+112*8(block), X7 - -#define STORE_MSG_1(block, off) \ - MOVOU X0, 8*off+0*8(block); \ - MOVOU X1, 8*off+16*8(block); \ - MOVOU X2, 8*off+32*8(block); \ - MOVOU X3, 8*off+48*8(block); \ - MOVOU X4, 8*off+64*8(block); \ - MOVOU X5, 8*off+80*8(block); \ - MOVOU X6, 8*off+96*8(block); \ - MOVOU X7, 8*off+112*8(block) - -#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ - LOAD_MSG_0(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_0(block, off) - -#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ - LOAD_MSG_1(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_1(block, off) - -// func blamkaSSE4(b *block) -TEXT ·blamkaSSE4(SB), 4, $0-8 - MOVQ b+0(FP), AX - - MOVOU ·c40<>(SB), X10 - MOVOU ·c48<>(SB), X11 - - BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) - - BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) - RET - -// func mixBlocksSSE2(out, a, b, c *block) -TEXT ·mixBlocksSSE2(SB), 4, $0-32 - MOVQ out+0(FP), DX - MOVQ a+8(FP), AX - MOVQ b+16(FP), BX - MOVQ a+24(FP), CX - MOVQ $128, BP - -loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - PXOR X1, X0 - PXOR X2, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, BP - JA loop - RET - -// func xorBlocksSSE2(out, a, b, c *block) -TEXT ·xorBlocksSSE2(SB), 4, $0-32 - MOVQ out+0(FP), DX - MOVQ a+8(FP), AX - MOVQ b+16(FP), BX - MOVQ a+24(FP), CX - MOVQ $128, BP - -loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - MOVOU 0(DX), X3 - PXOR X1, X0 - PXOR X2, X0 - PXOR X3, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, BP - JA loop - RET diff --git a/mantle/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/mantle/vendor/golang.org/x/crypto/argon2/blamka_generic.go deleted file mode 100644 index a481b224..00000000 --- a/mantle/vendor/golang.org/x/crypto/argon2/blamka_generic.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package argon2 - -var useSSE4 bool - -func processBlockGeneric(out, in1, in2 *block, xor bool) { - var t block - for i := range t { - t[i] = in1[i] ^ in2[i] - } - for i := 0; i < blockLength; i += 16 { - blamkaGeneric( - &t[i+0], &t[i+1], &t[i+2], &t[i+3], - &t[i+4], &t[i+5], &t[i+6], &t[i+7], - &t[i+8], &t[i+9], &t[i+10], &t[i+11], - &t[i+12], &t[i+13], &t[i+14], &t[i+15], - ) - } - for i := 0; i < blockLength/8; i += 2 { - blamkaGeneric( - &t[i], &t[i+1], &t[16+i], &t[16+i+1], - &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], - &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], - &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], - ) - } - if xor { - for i := range t { - out[i] ^= in1[i] ^ in2[i] ^ t[i] - } - } else { - for i := range t { - out[i] = in1[i] ^ in2[i] ^ t[i] - } - } -} - -func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { - v00, v01, v02, v03 := *t00, *t01, *t02, *t03 - v04, v05, v06, v07 := *t04, *t05, *t06, *t07 - v08, v09, v10, v11 := *t08, *t09, *t10, *t11 - v12, v13, v14, v15 := *t12, *t13, *t14, *t15 - - v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) - v12 ^= v00 - v12 = v12>>32 | v12<<32 - v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) - v04 ^= v08 - v04 = v04>>24 | v04<<40 - - v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) - v12 ^= v00 - v12 = v12>>16 | v12<<48 - v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) - v04 ^= v08 - v04 = v04>>63 | v04<<1 - - v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) - v13 ^= v01 - v13 = v13>>32 | v13<<32 - v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) - v05 ^= v09 - v05 = v05>>24 | v05<<40 - - v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) - v13 ^= v01 - v13 = v13>>16 | v13<<48 - v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) - v05 ^= v09 - v05 = v05>>63 | v05<<1 - - v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) - v14 ^= v02 - v14 = v14>>32 | v14<<32 - v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) - v06 ^= v10 - v06 = v06>>24 | v06<<40 - - v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) - v14 ^= v02 - v14 = v14>>16 | v14<<48 - v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) - v06 ^= v10 - v06 = v06>>63 | v06<<1 - - v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) - v15 ^= v03 - v15 = v15>>32 | v15<<32 - v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) - v07 ^= v11 - v07 = v07>>24 | v07<<40 - - v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) - v15 ^= v03 - v15 = v15>>16 | v15<<48 - v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) - v07 ^= v11 - v07 = v07>>63 | v07<<1 - - v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) - v15 ^= v00 - v15 = v15>>32 | v15<<32 - v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) - v05 ^= v10 - v05 = v05>>24 | v05<<40 - - v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) - v15 ^= v00 - v15 = v15>>16 | v15<<48 - v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) - v05 ^= v10 - v05 = v05>>63 | v05<<1 - - v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) - v12 ^= v01 - v12 = v12>>32 | v12<<32 - v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) - v06 ^= v11 - v06 = v06>>24 | v06<<40 - - v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) - v12 ^= v01 - v12 = v12>>16 | v12<<48 - v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) - v06 ^= v11 - v06 = v06>>63 | v06<<1 - - v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) - v13 ^= v02 - v13 = v13>>32 | v13<<32 - v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) - v07 ^= v08 - v07 = v07>>24 | v07<<40 - - v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) - v13 ^= v02 - v13 = v13>>16 | v13<<48 - v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) - v07 ^= v08 - v07 = v07>>63 | v07<<1 - - v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) - v14 ^= v03 - v14 = v14>>32 | v14<<32 - v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) - v04 ^= v09 - v04 = v04>>24 | v04<<40 - - v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) - v14 ^= v03 - v14 = v14>>16 | v14<<48 - v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) - v04 ^= v09 - v04 = v04>>63 | v04<<1 - - *t00, *t01, *t02, *t03 = v00, v01, v02, v03 - *t04, *t05, *t06, *t07 = v04, v05, v06, v07 - *t08, *t09, *t10, *t11 = v08, v09, v10, v11 - *t12, *t13, *t14, *t15 = v12, v13, v14, v15 -} diff --git a/mantle/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/mantle/vendor/golang.org/x/crypto/argon2/blamka_ref.go deleted file mode 100644 index 167c59d2..00000000 --- a/mantle/vendor/golang.org/x/crypto/argon2/blamka_ref.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc -// +build !amd64 purego !gc - -package argon2 - -func processBlock(out, in1, in2 *block) { - processBlockGeneric(out, in1, in2, false) -} - -func processBlockXOR(out, in1, in2 *block) { - processBlockGeneric(out, in1, in2, true) -} diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b.go b/mantle/vendor/golang.org/x/crypto/blake2b/blake2b.go deleted file mode 100644 index d2e98d42..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 -// and the extendable output function (XOF) BLAKE2Xb. -// -// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and -// produces digests of any size between 1 and 64 bytes. -// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf -// and for BLAKE2Xb see https://blake2.net/blake2x.pdf -// -// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). -// If you need a secret-key MAC (message authentication code), use the New512 -// function with a non-nil key. -// -// BLAKE2X is a construction to compute hash values larger than 64 bytes. It -// can produce hash values between 0 and 4 GiB. -package blake2b - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - // The blocksize of BLAKE2b in bytes. - BlockSize = 128 - // The hash size of BLAKE2b-512 in bytes. - Size = 64 - // The hash size of BLAKE2b-384 in bytes. - Size384 = 48 - // The hash size of BLAKE2b-256 in bytes. - Size256 = 32 -) - -var ( - useAVX2 bool - useAVX bool - useSSE4 bool -) - -var ( - errKeySize = errors.New("blake2b: invalid key size") - errHashSize = errors.New("blake2b: invalid hash size") -) - -var iv = [8]uint64{ - 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, -} - -// Sum512 returns the BLAKE2b-512 checksum of the data. -func Sum512(data []byte) [Size]byte { - var sum [Size]byte - checkSum(&sum, Size, data) - return sum -} - -// Sum384 returns the BLAKE2b-384 checksum of the data. -func Sum384(data []byte) [Size384]byte { - var sum [Size]byte - var sum384 [Size384]byte - checkSum(&sum, Size384, data) - copy(sum384[:], sum[:Size384]) - return sum384 -} - -// Sum256 returns the BLAKE2b-256 checksum of the data. -func Sum256(data []byte) [Size256]byte { - var sum [Size]byte - var sum256 [Size256]byte - checkSum(&sum, Size256, data) - copy(sum256[:], sum[:Size256]) - return sum256 -} - -// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } - -// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } - -// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } - -// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. -// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. -// The hash size can be a value between 1 and 64 but it is highly recommended to use -// values equal or greater than: -// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). -// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). -// When the key is nil, the returned hash.Hash implements BinaryMarshaler -// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. -func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } - -func newDigest(hashSize int, key []byte) (*digest, error) { - if hashSize < 1 || hashSize > Size { - return nil, errHashSize - } - if len(key) > Size { - return nil, errKeySize - } - d := &digest{ - size: hashSize, - keyLen: len(key), - } - copy(d.key[:], key) - d.Reset() - return d, nil -} - -func checkSum(sum *[Size]byte, hashSize int, data []byte) { - h := iv - h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) - var c [2]uint64 - - if length := len(data); length > BlockSize { - n := length &^ (BlockSize - 1) - if length == n { - n -= BlockSize - } - hashBlocks(&h, &c, 0, data[:n]) - data = data[n:] - } - - var block [BlockSize]byte - offset := copy(block[:], data) - remaining := uint64(BlockSize - offset) - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h[:(hashSize+7)/8] { - binary.LittleEndian.PutUint64(sum[8*i:], v) - } -} - -type digest struct { - h [8]uint64 - c [2]uint64 - size int - block [BlockSize]byte - offset int - - key [BlockSize]byte - keyLen int -} - -const ( - magic = "b2b" - marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 -) - -func (d *digest) MarshalBinary() ([]byte, error) { - if d.keyLen != 0 { - return nil, errors.New("crypto/blake2b: cannot marshal MACs") - } - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - for i := 0; i < 8; i++ { - b = appendUint64(b, d.h[i]) - } - b = appendUint64(b, d.c[0]) - b = appendUint64(b, d.c[1]) - // Maximum value for size is 64 - b = append(b, byte(d.size)) - b = append(b, d.block[:]...) - b = append(b, byte(d.offset)) - return b, nil -} - -func (d *digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("crypto/blake2b: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("crypto/blake2b: invalid hash state size") - } - b = b[len(magic):] - for i := 0; i < 8; i++ { - b, d.h[i] = consumeUint64(b) - } - b, d.c[0] = consumeUint64(b) - b, d.c[1] = consumeUint64(b) - d.size = int(b[0]) - b = b[1:] - copy(d.block[:], b[:BlockSize]) - b = b[BlockSize:] - d.offset = int(b[0]) - return nil -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Size() int { return d.size } - -func (d *digest) Reset() { - d.h = iv - d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) - d.offset, d.c[0], d.c[1] = 0, 0, 0 - if d.keyLen > 0 { - d.block = d.key - d.offset = BlockSize - } -} - -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - - if d.offset > 0 { - remaining := BlockSize - d.offset - if n <= remaining { - d.offset += copy(d.block[d.offset:], p) - return - } - copy(d.block[d.offset:], p[:remaining]) - hashBlocks(&d.h, &d.c, 0, d.block[:]) - d.offset = 0 - p = p[remaining:] - } - - if length := len(p); length > BlockSize { - nn := length &^ (BlockSize - 1) - if length == nn { - nn -= BlockSize - } - hashBlocks(&d.h, &d.c, 0, p[:nn]) - p = p[nn:] - } - - if len(p) > 0 { - d.offset += copy(d.block[:], p) - } - - return -} - -func (d *digest) Sum(sum []byte) []byte { - var hash [Size]byte - d.finalize(&hash) - return append(sum, hash[:d.size]...) -} - -func (d *digest) finalize(hash *[Size]byte) { - var block [BlockSize]byte - copy(block[:], d.block[:d.offset]) - remaining := uint64(BlockSize - d.offset) - - c := d.c - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - h := d.h - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h { - binary.LittleEndian.PutUint64(hash[8*i:], v) - } -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.BigEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func appendUint32(b []byte, x uint32) []byte { - var a [4]byte - binary.BigEndian.PutUint32(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := binary.BigEndian.Uint64(b) - return b[8:], x -} - -func consumeUint32(b []byte) ([]byte, uint32) { - x := binary.BigEndian.Uint32(b) - return b[4:], x -} diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/mantle/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go deleted file mode 100644 index 56bfaaa1..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useAVX2 = cpu.X86.HasAVX2 - useAVX = cpu.X86.HasAVX - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - switch { - case useAVX2: - hashBlocksAVX2(h, c, flag, blocks) - case useAVX: - hashBlocksAVX(h, c, flag, blocks) - case useSSE4: - hashBlocksSSE4(h, c, flag, blocks) - default: - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/mantle/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s deleted file mode 100644 index 4b9daa18..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ /dev/null @@ -1,745 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 && amd64 && gc && !purego -// +build go1.7,amd64,gc,!purego - -#include "textflag.h" - -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - -// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 - VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) - -loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 - JGE noinc - INCQ R9 - MOVQ R9, 8(DX) - -noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) - VZEROUPPER - - RET - -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) - -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 - -// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 - VMOVDQA X0, X8 - VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 - VMOVDQU 16(AX), X11 - VMOVDQU 32(AX), X2 - VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - - VMOVDQA X10, X0 - VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - - VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() - VMOVDQA X12, 16(R10) - VMOVDQA X13, 32(R10) - VMOVDQA X14, 48(R10) - VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) - VMOVDQA X12, 80(R10) - VMOVDQA X13, 96(R10) - VMOVDQA X14, 112(R10) - VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) - VMOVDQA X12, 144(R10) - VMOVDQA X13, 160(R10) - VMOVDQA X14, 176(R10) - VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() - VMOVDQA X12, 208(R10) - VMOVDQA X13, 224(R10) - VMOVDQA X14, 240(R10) - VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - VMOVDQU 32(AX), X14 - VMOVDQU 48(AX), X15 - VPXOR X0, X10, X10 - VPXOR X1, X11, X11 - VPXOR X2, X14, X14 - VPXOR X3, X15, X15 - VPXOR X4, X10, X10 - VPXOR X5, X11, X11 - VPXOR X6, X14, X2 - VPXOR X7, X15, X3 - VMOVDQU X2, 32(AX) - VMOVDQU X3, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER - - RET diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go deleted file mode 100644 index 5fa1b328..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 && amd64 && gc && !purego -// +build !go1.7,amd64,gc,!purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - if useSSE4 { - hashBlocksSSE4(h, c, flag, blocks) - } else { - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s deleted file mode 100644 index ae75eb9a..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -#include "textflag.h" - -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - -// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 - MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - RET diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go deleted file mode 100644 index 3168a8aa..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "encoding/binary" - "math/bits" -) - -// the precomputed values for BLAKE2b -// there are 12 16-byte arrays - one for each round -// the entries are calculated from the sigma constants. -var precomputed = [12][16]byte{ - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, - {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, - {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, - {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, - {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, - {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, - {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, - {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, - {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second -} - -func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - var m [16]uint64 - c0, c1 := c[0], c[1] - - for i := 0; i < len(blocks); { - c0 += BlockSize - if c0 < BlockSize { - c1++ - } - - v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] - v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] - v12 ^= c0 - v13 ^= c1 - v14 ^= flag - - for j := range m { - m[j] = binary.LittleEndian.Uint64(blocks[i:]) - i += 8 - } - - for j := range precomputed { - s := &(precomputed[j]) - - v0 += m[s[0]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft64(v12, -32) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft64(v4, -24) - v1 += m[s[1]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft64(v13, -32) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft64(v5, -24) - v2 += m[s[2]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft64(v14, -32) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft64(v6, -24) - v3 += m[s[3]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft64(v15, -32) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft64(v7, -24) - - v0 += m[s[4]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft64(v12, -16) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft64(v4, -63) - v1 += m[s[5]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft64(v13, -16) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft64(v5, -63) - v2 += m[s[6]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft64(v14, -16) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft64(v6, -63) - v3 += m[s[7]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft64(v15, -16) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft64(v7, -63) - - v0 += m[s[8]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft64(v15, -32) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft64(v5, -24) - v1 += m[s[9]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft64(v12, -32) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft64(v6, -24) - v2 += m[s[10]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft64(v13, -32) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft64(v7, -24) - v3 += m[s[11]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft64(v14, -32) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft64(v4, -24) - - v0 += m[s[12]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft64(v15, -16) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft64(v5, -63) - v1 += m[s[13]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft64(v12, -16) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft64(v6, -63) - v2 += m[s[14]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft64(v13, -16) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft64(v7, -63) - v3 += m[s[15]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft64(v14, -16) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft64(v4, -63) - - } - - h[0] ^= v0 ^ v8 - h[1] ^= v1 ^ v9 - h[2] ^= v2 ^ v10 - h[3] ^= v3 ^ v11 - h[4] ^= v4 ^ v12 - h[5] ^= v5 ^ v13 - h[6] ^= v6 ^ v14 - h[7] ^= v7 ^ v15 - } - c[0], c[1] = c0, c1 -} diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go deleted file mode 100644 index b0137cdf..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc -// +build !amd64 purego !gc - -package blake2b - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - hashBlocksGeneric(h, c, flag, blocks) -} diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/blake2x.go b/mantle/vendor/golang.org/x/crypto/blake2b/blake2x.go deleted file mode 100644 index 52c414db..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/blake2x.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "encoding/binary" - "errors" - "io" -) - -// XOF defines the interface to hash functions that -// support arbitrary-length output. -type XOF interface { - // Write absorbs more data into the hash's state. It panics if called - // after Read. - io.Writer - - // Read reads more output from the hash. It returns io.EOF if the limit - // has been reached. - io.Reader - - // Clone returns a copy of the XOF in its current state. - Clone() XOF - - // Reset resets the XOF to its initial state. - Reset() -} - -// OutputLengthUnknown can be used as the size argument to NewXOF to indicate -// the length of the output is not known in advance. -const OutputLengthUnknown = 0 - -// magicUnknownOutputLength is a magic value for the output size that indicates -// an unknown number of output bytes. -const magicUnknownOutputLength = (1 << 32) - 1 - -// maxOutputLength is the absolute maximum number of bytes to produce when the -// number of output bytes is unknown. -const maxOutputLength = (1 << 32) * 64 - -// NewXOF creates a new variable-output-length hash. The hash either produce a -// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes -// (size == OutputLengthUnknown). In the latter case, an absolute limit of -// 256GiB applies. -// -// A non-nil key turns the hash into a MAC. The key must between -// zero and 32 bytes long. -func NewXOF(size uint32, key []byte) (XOF, error) { - if len(key) > Size { - return nil, errKeySize - } - if size == magicUnknownOutputLength { - // 2^32-1 indicates an unknown number of bytes and thus isn't a - // valid length. - return nil, errors.New("blake2b: XOF length too large") - } - if size == OutputLengthUnknown { - size = magicUnknownOutputLength - } - x := &xof{ - d: digest{ - size: Size, - keyLen: len(key), - }, - length: size, - } - copy(x.d.key[:], key) - x.Reset() - return x, nil -} - -type xof struct { - d digest - length uint32 - remaining uint64 - cfg, root, block [Size]byte - offset int - nodeOffset uint32 - readMode bool -} - -func (x *xof) Write(p []byte) (n int, err error) { - if x.readMode { - panic("blake2b: write to XOF after read") - } - return x.d.Write(p) -} - -func (x *xof) Clone() XOF { - clone := *x - return &clone -} - -func (x *xof) Reset() { - x.cfg[0] = byte(Size) - binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length - binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length - x.cfg[17] = byte(Size) // inner hash size - - x.d.Reset() - x.d.h[1] ^= uint64(x.length) << 32 - - x.remaining = uint64(x.length) - if x.remaining == magicUnknownOutputLength { - x.remaining = maxOutputLength - } - x.offset, x.nodeOffset = 0, 0 - x.readMode = false -} - -func (x *xof) Read(p []byte) (n int, err error) { - if !x.readMode { - x.d.finalize(&x.root) - x.readMode = true - } - - if x.remaining == 0 { - return 0, io.EOF - } - - n = len(p) - if uint64(n) > x.remaining { - n = int(x.remaining) - p = p[:n] - } - - if x.offset > 0 { - blockRemaining := Size - x.offset - if n < blockRemaining { - x.offset += copy(p, x.block[x.offset:]) - x.remaining -= uint64(n) - return - } - copy(p, x.block[x.offset:]) - p = p[blockRemaining:] - x.offset = 0 - x.remaining -= uint64(blockRemaining) - } - - for len(p) >= Size { - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - copy(p, x.block[:]) - p = p[Size:] - x.remaining -= uint64(Size) - } - - if todo := len(p); todo > 0 { - if x.remaining < uint64(Size) { - x.cfg[0] = byte(x.remaining) - } - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - x.offset = copy(p, x.block[:todo]) - x.remaining -= uint64(todo) - } - return -} - -func (d *digest) initConfig(cfg *[Size]byte) { - d.offset, d.c[0], d.c[1] = 0, 0, 0 - for i := range d.h { - d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) - } -} diff --git a/mantle/vendor/golang.org/x/crypto/blake2b/register.go b/mantle/vendor/golang.org/x/crypto/blake2b/register.go deleted file mode 100644 index 9d863396..00000000 --- a/mantle/vendor/golang.org/x/crypto/blake2b/register.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package blake2b - -import ( - "crypto" - "hash" -) - -func init() { - newHash256 := func() hash.Hash { - h, _ := New256(nil) - return h - } - newHash384 := func() hash.Hash { - h, _ := New384(nil) - return h - } - - newHash512 := func() hash.Hash { - h, _ := New512(nil) - return h - } - - crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) - crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) - crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) -} diff --git a/mantle/vendor/golang.org/x/crypto/cast5/cast5.go b/mantle/vendor/golang.org/x/crypto/cast5/cast5.go deleted file mode 100644 index ddcbeb6f..00000000 --- a/mantle/vendor/golang.org/x/crypto/cast5/cast5.go +++ /dev/null @@ -1,533 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cast5 implements CAST5, as defined in RFC 2144. -// -// CAST5 is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package cast5 // import "golang.org/x/crypto/cast5" - -import "errors" - -const BlockSize = 8 -const KeySize = 16 - -type Cipher struct { - masking [16]uint32 - rotate [16]uint8 -} - -func NewCipher(key []byte) (c *Cipher, err error) { - if len(key) != KeySize { - return nil, errors.New("CAST5: keys must be 16 bytes") - } - - c = new(Cipher) - c.keySchedule(key) - return -} - -func (c *Cipher) BlockSize() int { - return BlockSize -} - -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -type keyScheduleA [4][7]uint8 -type keyScheduleB [4][5]uint8 - -// keyScheduleRound contains the magic values for a round of the key schedule. -// The keyScheduleA deals with the lines like: -// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] -// Conceptually, both x and z are in the same array, x first. The first -// element describes which word of this array gets written to and the -// second, which word gets read. So, for the line above, it's "4, 0", because -// it's writing to the first word of z, which, being after x, is word 4, and -// reading from the first word of x: word 0. -// -// Next are the indexes into the S-boxes. Now the array is treated as bytes. So -// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear -// that it's z that we're indexing. -// -// keyScheduleB deals with lines like: -// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] -// "K1" is ignored because key words are always written in order. So the five -// elements are the S-box indexes. They use the same form as in keyScheduleA, -// above. - -type keyScheduleRound struct{} -type keySchedule []keyScheduleRound - -var schedule = []struct { - a keyScheduleA - b keyScheduleB -}{ - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, - {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, - {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, - {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {3, 2, 0xc, 0xd, 8}, - {1, 0, 0xe, 0xf, 0xd}, - {7, 6, 8, 9, 3}, - {5, 4, 0xa, 0xb, 7}, - }, - }, - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, - {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, - {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, - {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {8, 9, 7, 6, 3}, - {0xa, 0xb, 5, 4, 7}, - {0xc, 0xd, 3, 2, 8}, - {0xe, 0xf, 1, 0, 0xd}, - }, - }, -} - -func (c *Cipher) keySchedule(in []byte) { - var t [8]uint32 - var k [32]uint32 - - for i := 0; i < 4; i++ { - j := i * 4 - t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) - } - - x := []byte{6, 7, 4, 5} - ki := 0 - - for half := 0; half < 2; half++ { - for _, round := range schedule { - for j := 0; j < 4; j++ { - var a [7]uint8 - copy(a[:], round.a[j][:]) - w := t[a[1]] - w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] - w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] - w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] - w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] - w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] - t[a[0]] = w - } - - for j := 0; j < 4; j++ { - var b [5]uint8 - copy(b[:], round.b[j][:]) - w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] - w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] - w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] - w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] - w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] - k[ki] = w - ki++ - } - } - } - - for i := 0; i < 16; i++ { - c.masking[i] = k[i] - c.rotate[i] = uint8(k[16+i] & 0x1f) - } -} - -// These are the three 'f' functions. See RFC 2144, section 2.2. -func f1(d, m uint32, r uint8) uint32 { - t := m + d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] -} - -func f2(d, m uint32, r uint8) uint32 { - t := m ^ d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] -} - -func f3(d, m uint32, r uint8) uint32 { - t := m - d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] -} - -var sBox = [8][256]uint32{ - { - 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, - 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, - 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, - 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, - 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, - 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, - 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, - 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, - 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, - 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, - 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, - 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, - 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, - 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, - 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, - 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, - 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, - 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, - 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, - 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, - 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, - 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, - 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, - 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, - 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, - 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, - 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, - 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, - 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, - 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, - 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, - 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, - }, - { - 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, - 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, - 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, - 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, - 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, - 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, - 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, - 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, - 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, - 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, - 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, - 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, - 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, - 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, - 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, - 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, - 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, - 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, - 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, - 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, - 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, - 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, - 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, - 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, - 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, - 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, - 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, - 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, - 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, - 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, - 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, - 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, - }, - { - 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, - 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, - 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, - 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, - 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, - 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, - 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, - 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, - 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, - 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, - 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, - 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, - 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, - 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, - 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, - 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, - 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, - 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, - 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, - 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, - 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, - 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, - 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, - 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, - 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, - 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, - 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, - 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, - 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, - 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, - 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, - 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, - }, - { - 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, - 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, - 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, - 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, - 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, - 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, - 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, - 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, - 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, - 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, - 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, - 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, - 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, - 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, - 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, - 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, - 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, - 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, - 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, - 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, - 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, - 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, - 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, - 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, - 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, - 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, - 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, - 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, - 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, - 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, - 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, - 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, - }, - { - 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, - 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, - 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, - 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, - 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, - 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, - 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, - 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, - 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, - 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, - 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, - 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, - 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, - 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, - 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, - 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, - 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, - 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, - 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, - 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, - 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, - 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, - 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, - 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, - 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, - 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, - 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, - 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, - 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, - 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, - 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, - 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, - }, - { - 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, - 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, - 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, - 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, - 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, - 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, - 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, - 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, - 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, - 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, - 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, - 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, - 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, - 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, - 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, - 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, - 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, - 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, - 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, - 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, - 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, - 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, - 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, - 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, - 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, - 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, - 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, - 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, - 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, - 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, - 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, - 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, - }, - { - 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, - 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, - 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, - 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, - 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, - 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, - 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, - 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, - 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, - 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, - 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, - 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, - 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, - 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, - 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, - 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, - 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, - 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, - 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, - 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, - 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, - 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, - 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, - 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, - 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, - 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, - 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, - 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, - 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, - 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, - 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, - 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, - }, - { - 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, - 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, - 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, - 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, - 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, - 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, - 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, - 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, - 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, - 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, - 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, - 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, - 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, - 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, - 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, - 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, - 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, - 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, - 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, - 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, - 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, - 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, - 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, - 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, - 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, - 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, - 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, - 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, - 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, - 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, - 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, - 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, - }, -} diff --git a/mantle/vendor/golang.org/x/crypto/ed25519/ed25519.go b/mantle/vendor/golang.org/x/crypto/ed25519/ed25519.go index 71ad917d..a7828345 100644 --- a/mantle/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ b/mantle/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -1,13 +1,7 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// In Go 1.13, the ed25519 package was promoted to the standard library as -// crypto/ed25519, and this package became a wrapper for the standard library one. -// -//go:build !go1.13 -// +build !go1.13 - // Package ed25519 implements the Ed25519 signature algorithm. See // https://ed25519.cr.yp.to/. // @@ -16,21 +10,15 @@ // representation includes a public key suffix to make multiple signing // operations with the same key more efficient. This package refers to the RFC // 8032 private key as the “seed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. package ed25519 -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - import ( - "bytes" - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "errors" + "crypto/ed25519" "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" ) const ( @@ -45,57 +33,21 @@ const ( ) // PublicKey is the type of Ed25519 public keys. -type PublicKey []byte +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey // PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Seed returns the private key seed corresponding to priv. It is provided for -// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds -// in this package. -func (priv PrivateKey) Seed() []byte { - seed := make([]byte, SeedSize) - copy(seed, priv[:32]) - return seed -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey // GenerateKey generates a public/private key pair using entropy from rand. // If rand is nil, crypto/rand.Reader will be used. func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - if rand == nil { - rand = cryptorand.Reader - } - - seed := make([]byte, SeedSize) - if _, err := io.ReadFull(rand, seed); err != nil { - return nil, nil, err - } - - privateKey := NewKeyFromSeed(seed) - publicKey := make([]byte, PublicKeySize) - copy(publicKey, privateKey[32:]) - - return publicKey, privateKey, nil + return ed25519.GenerateKey(rand) } // NewKeyFromSeed calculates a private key from a seed. It will panic if @@ -103,121 +55,17 @@ func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { // with RFC 8032. RFC 8032's private keys correspond to seeds in this // package. func NewKeyFromSeed(seed []byte) PrivateKey { - if l := len(seed); l != SeedSize { - panic("ed25519: bad seed length: " + strconv.Itoa(l)) - } - - digest := sha512.Sum512(seed) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - privateKey := make([]byte, PrivateKeySize) - copy(privateKey, seed) - copy(privateKey[32:], publicKeyBytes[:]) - - return privateKey + return ed25519.NewKeyFromSeed(seed) } // Sign signs the message with privateKey and returns a signature. It will // panic if len(privateKey) is not PrivateKeySize. func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature + return ed25519.Sign(privateKey, message) } // Verify reports whether sig is a valid signature of message by publicKey. It // will panic if len(publicKey) is not PublicKeySize. func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var s [32]byte - copy(s[:], sig[32:]) - - // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in - // the range [0, order) in order to prevent signature malleability. - if !edwards25519.ScMinimal(&s) { - return false - } - - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) - - var checkR [32]byte - R.ToBytes(&checkR) - return bytes.Equal(sig[:32], checkR[:]) + return ed25519.Verify(publicKey, message, sig) } diff --git a/mantle/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/mantle/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go deleted file mode 100644 index b5974dc8..00000000 --- a/mantle/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/mantle/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/mantle/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086c..00000000 --- a/mantle/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/mantle/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/mantle/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index fd03c252..00000000 --- a/mantle/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -import "encoding/binary" - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} - -// order is the order of Curve25519 in little-endian form. -var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} - -// ScMinimal returns true if the given scalar is less than the order of the -// curve. -func ScMinimal(scalar *[32]byte) bool { - for i := 3; ; i-- { - v := binary.LittleEndian.Uint64(scalar[i*8:]) - if v > order[i] { - return false - } else if v < order[i] { - break - } else if i == 0 { - return false - } - } - - return true -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/mantle/vendor/golang.org/x/crypto/openpgp/armor/armor.go deleted file mode 100644 index ebc87876..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package armor // import "golang.org/x/crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if bytes.HasPrefix(line, armorEnd) { - l.eof = true - return 0, io.EOF - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - l.crcSet = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/mantle/vendor/golang.org/x/crypto/openpgp/armor/encode.go deleted file mode 100644 index 6f07582c..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/mantle/vendor/golang.org/x/crypto/openpgp/canonical_text.go deleted file mode 100644 index e601e389..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import "hash" - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - start := 0 - - for i, c := range buf { - switch cth.s { - case 0: - if c == '\r' { - cth.s = 1 - } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) - start = i + 1 - } - case 1: - cth.s = 0 - } - } - - cth.h.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/mantle/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 84396a08..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -// -// Deprecated: this package was only provided to support ElGamal encryption in -// OpenPGP. The golang.org/x/crypto/openpgp package is now deprecated (see -// https://golang.org/issue/44226), and ElGamal in the OpenPGP ecosystem has -// compatibility and security issues (see https://eprint.iacr.org/2021/923). -// Moreover, this package doesn't protect against side-channel attacks. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - if s.ModInverse(s, priv.P) == nil { - return nil, errors.New("elgamal: invalid private key") - } - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/mantle/vendor/golang.org/x/crypto/openpgp/errors/errors.go deleted file mode 100644 index 1d7a0ea0..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package errors // import "golang.org/x/crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/keys.go b/mantle/vendor/golang.org/x/crypto/openpgp/keys.go deleted file mode 100644 index faa2fb36..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/keys.go +++ /dev/null @@ -1,693 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto/rsa" - "io" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdAndUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - if err := addUserID(e, packets, pkt); err != nil { - return nil, err - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } - // Else, ignoring the signature as it does not follow anything - // we would know to attach it to. - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - identity := new(Identity) - identity.Name = pkt.Id - identity.UserId = pkt - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - identity.SelfSignature = sig - e.Identities[pkt.Id] = identity - } else { - identity.Signatures = append(identity.Signatures, sig) - } - } - - return nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - - if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - switch sig.SigType { - case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig - case packet.SigTypeSubkeyBinding: - - if shouldReplaceSubkeySig(subKey.Sig, sig) { - subKey.Sig = sig - } - } - } - - if subKey.Sig == nil { - return errors.StructuralError("subkey packet not followed by signature") - } - - e.Subkeys = append(e.Subkeys, subKey) - - return nil -} - -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - creationTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - - // If the user passes in a DefaultHash via packet.Config, - // set the PreferredHash for the SelfSignature. - if config != nil && config.DefaultHash != 0 { - e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} - } - - // Likewise for DefaultCipher. - if config != nil && config.DefaultCipher != 0 { - e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: creationTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) - if err != nil { - return nil, err - } - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, but -// excluding signatures from other entities, to the given Writer. -// Identities and subkeys are re-signed in case they changed since NewEntry. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w, including -// signatures from other entities. No private key material will be output. -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/compressed.go deleted file mode 100644 index e8f0b5ca..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "golang.org/x/crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/config.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/config.go deleted file mode 100644 index c76eecc9..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 6d763972..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - if err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - // Supports both *rsa.PrivateKey and crypto.Decrypter - k := priv.PrivateKey.(crypto.Decrypter) - b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.bytes), nil) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/literal.go deleted file mode 100644 index 1a9ec6e5..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.IsBinary = buf[0] == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go deleted file mode 100644 index ce2a33a5..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 17135033..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/opaque.go deleted file mode 100644 index 456d807f..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - "io/ioutil" - - "golang.org/x/crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/packet.go deleted file mode 100644 index 0a19794a..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package packet // import "golang.org/x/crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rsa" - "io" - "math/big" - "math/bits" - - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte - sentFirst bool - buf []byte -} - -// RFC 4880 4.2.2.4: the first partial length MUST be at least 512 octets long. -const minFirstPartialWrite = 512 - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - off := 0 - if !w.sentFirst { - if len(w.buf) > 0 || len(p) < minFirstPartialWrite { - off = len(w.buf) - w.buf = append(w.buf, p...) - if len(w.buf) < minFirstPartialWrite { - return len(p), nil - } - p = w.buf - w.buf = nil - } - w.sentFirst = true - } - - power := uint8(30) - for len(p) > 0 { - l := 1 << power - if len(p) < l { - power = uint8(bits.Len32(uint32(len(p)))) - 1 - l = 1 << power - } - w.lengthByte[0] = 224 + power - _, err = w.w.Write(w.lengthByte[:]) - if err == nil { - var m int - m, err = w.w.Write(p[:l]) - n += m - } - if err != nil { - if n < off { - return 0, err - } - return n - off, err - } - p = p[l:] - } - return n - off, nil -} - -func (w *partialLengthWriter) Close() error { - if len(w.buf) > 0 { - // In this case we can't send a 512 byte packet. - // Just send what we have. - p := w.buf - w.sentFirst = true - w.buf = nil - if _, err := w.Write(p); err != nil { - return err - } - } - - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - - // Deprecated in RFC 4880, Section 13.5. Use key flags instead. - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - // According to RFC 4880 3.2. we should check that the MPI has no leading - // zeroes (at least when not an encrypted MPI?), but this implementation - // does generate leading zeroes, so we keep accepting them. - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. - // Implementations seem to be tolerant of them, and stripping them would - // make it complex to guarantee matching re-serialization. - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/private_key.go deleted file mode 100644 index 81abb7ce..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha1" - "io" - "io/ioutil" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or crypto.Signer/crypto.Decrypter (Decryptor RSA only). - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that -// implements RSA or ECDSA. -func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - // In general, the public Keys should be used as pointers. We still - // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.Public().(type) { - case *rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) - case *ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = ioutil.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - buf.WriteByte(0 /* no encryption */) - - privateKeyBuf := bytes.NewBuffer(nil) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - pk.PrivateKey = &ecdsa.PrivateKey{ - PublicKey: *ecdsaPub, - D: new(big.Int).SetBytes(d), - } - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/public_key.go deleted file mode 100644 index fcd5f525..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} -) - -const maxOIDLength = 8 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func fromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: fromBig(pub.P), - q: fromBig(pub.Q), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: fromBig(pub.P), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - default: - panic("unknown elliptic curve") - } - - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - - // The bit length is 3 (for the 0x04 specifying an uncompressed key) - // plus two field elements (for x and y), which are rounded up to the - // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 - fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 - pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 5daf7b6c..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/reader.go deleted file mode 100644 index 34bc7c61..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/signature.go deleted file mode 100644 index b2a24a53..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = fromBig(r) - sig.ECDSASigS = fromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index 6edff889..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 744c2d2c..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + - "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") - } - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index 6126030e..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "golang.org/x/crypto/openpgp/errors" - "hash" - "io" - "strconv" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - - plaintext := cipher.StreamReader{S: s, R: se.contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.SignatureError("error during reading") - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.SignatureError("error during reading") - } - } - - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go deleted file mode 100644 index d19ffbc7..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" - "io/ioutil" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - sp.Serialize(&buf) - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/mantle/vendor/golang.org/x/crypto/openpgp/packet/userid.go deleted file mode 100644 index d6bea7d4..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "io/ioutil" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - return -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/read.go b/mantle/vendor/golang.org/x/crypto/openpgp/read.go deleted file mode 100644 index 48a89314..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/read.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package openpgp // import "golang.org/x/crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body) -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/mantle/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go deleted file mode 100644 index 9de04958..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -// -// Deprecated: this package is unmaintained except for security fixes. New -// applications should consider a more focused, modern alternative to OpenPGP -// for their specific task. If you are required to interoperate with OpenPGP -// systems and need a maintained package, consider a community fork. -// See https://golang.org/issue/44226. -package s2k // import "golang.org/x/crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/mantle/vendor/golang.org/x/crypto/openpgp/write.go b/mantle/vendor/golang.org/x/crypto/openpgp/write.go deleted file mode 100644 index 4ee71784..00000000 --- a/mantle/vendor/golang.org/x/crypto/openpgp/write.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" - "golang.org/x/crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signer.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signer.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// writeAndSign writes the data as a payload package and, optionally, signs -// it. hints contains optional information, that is also encrypted, -// that aids the recipients in processing the message. The resulting -// WriteCloser must be closed after the contents of the file have been -// written. If config is nil, sensible defaults will be used. -func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(payload); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := payload - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{w} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if len(to) == 0 { - return nil, errors.InvalidArgumentError("no encryption recipient provided") - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] - defaultHashes := candidateHashes[len(candidateHashes)-1:] - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - return writeAndSign(payload, candidateHashes, signed, hints, config) -} - -// Sign signs a message. The resulting WriteCloser must be closed after the -// contents of the file have been written. hints contains optional information -// that aids the recipients in processing the message. -// If config is nil, sensible defaults will be used. -func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { - if signed == nil { - return nil, errors.InvalidArgumentError("no signer provided") - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - defaultHashes := candidateHashes[len(candidateHashes)-1:] - preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config) -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} diff --git a/mantle/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/mantle/vendor/golang.org/x/crypto/pkcs12/bmp-string.go deleted file mode 100644 index 233b8b62..00000000 --- a/mantle/vendor/golang.org/x/crypto/pkcs12/bmp-string.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "errors" - "unicode/utf16" -) - -// bmpString returns s encoded in UCS-2 with a zero terminator. -func bmpString(s string) ([]byte, error) { - // References: - // https://tools.ietf.org/html/rfc7292#appendix-B.1 - // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane - // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes - // EncodeRune returns 0xfffd if the rune does not need special encoding - // - the above RFC provides the info that BMPStrings are NULL terminated. - - ret := make([]byte, 0, 2*len(s)+2) - - for _, r := range s { - if t, _ := utf16.EncodeRune(r); t != 0xfffd { - return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") - } - ret = append(ret, byte(r/256), byte(r%256)) - } - - return append(ret, 0, 0), nil -} - -func decodeBMPString(bmpString []byte) (string, error) { - if len(bmpString)%2 != 0 { - return "", errors.New("pkcs12: odd-length BMP string") - } - - // strip terminator if present - if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { - bmpString = bmpString[:l-2] - } - - s := make([]uint16, 0, len(bmpString)/2) - for len(bmpString) > 0 { - s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) - bmpString = bmpString[2:] - } - - return string(utf16.Decode(s)), nil -} diff --git a/mantle/vendor/golang.org/x/crypto/pkcs12/crypto.go b/mantle/vendor/golang.org/x/crypto/pkcs12/crypto.go deleted file mode 100644 index 484ca51b..00000000 --- a/mantle/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "crypto/cipher" - "crypto/des" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - - "golang.org/x/crypto/pkcs12/internal/rc2" -) - -var ( - oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) - oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) -) - -// pbeCipher is an abstraction of a PKCS#12 cipher. -type pbeCipher interface { - // create returns a cipher.Block given a key. - create(key []byte) (cipher.Block, error) - // deriveKey returns a key derived from the given password and salt. - deriveKey(salt, password []byte, iterations int) []byte - // deriveKey returns an IV derived from the given password and salt. - deriveIV(salt, password []byte, iterations int) []byte -} - -type shaWithTripleDESCBC struct{} - -func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { - return des.NewTripleDESCipher(key) -} - -func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) -} - -func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) -} - -type shaWith40BitRC2CBC struct{} - -func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { - return rc2.New(key, len(key)*8) -} - -func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) -} - -func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { - return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) -} - -type pbeParams struct { - Salt []byte - Iterations int -} - -func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { - var cipherType pbeCipher - - switch { - case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): - cipherType = shaWithTripleDESCBC{} - case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): - cipherType = shaWith40BitRC2CBC{} - default: - return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") - } - - var params pbeParams - if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { - return nil, 0, err - } - - key := cipherType.deriveKey(params.Salt, password, params.Iterations) - iv := cipherType.deriveIV(params.Salt, password, params.Iterations) - - block, err := cipherType.create(key) - if err != nil { - return nil, 0, err - } - - return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil -} - -func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { - cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) - if err != nil { - return nil, err - } - - encrypted := info.Data() - if len(encrypted) == 0 { - return nil, errors.New("pkcs12: empty encrypted data") - } - if len(encrypted)%blockSize != 0 { - return nil, errors.New("pkcs12: input is not a multiple of the block size") - } - decrypted = make([]byte, len(encrypted)) - cbc.CryptBlocks(decrypted, encrypted) - - psLen := int(decrypted[len(decrypted)-1]) - if psLen == 0 || psLen > blockSize { - return nil, ErrDecryption - } - - if len(decrypted) < psLen { - return nil, ErrDecryption - } - ps := decrypted[len(decrypted)-psLen:] - decrypted = decrypted[:len(decrypted)-psLen] - if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { - return nil, ErrDecryption - } - - return -} - -// decryptable abstracts an object that contains ciphertext. -type decryptable interface { - Algorithm() pkix.AlgorithmIdentifier - Data() []byte -} diff --git a/mantle/vendor/golang.org/x/crypto/pkcs12/errors.go b/mantle/vendor/golang.org/x/crypto/pkcs12/errors.go deleted file mode 100644 index 7377ce6f..00000000 --- a/mantle/vendor/golang.org/x/crypto/pkcs12/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import "errors" - -var ( - // ErrDecryption represents a failure to decrypt the input. - ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") - - // ErrIncorrectPassword is returned when an incorrect password is detected. - // Usually, P12/PFX data is signed to be able to verify the password. - ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") -) - -// NotImplementedError indicates that the input is not currently supported. -type NotImplementedError string - -func (e NotImplementedError) Error() string { - return "pkcs12: " + string(e) -} diff --git a/mantle/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/mantle/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go deleted file mode 100644 index 7499e3fb..00000000 --- a/mantle/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rc2 implements the RC2 cipher -/* -https://www.ietf.org/rfc/rfc2268.txt -http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf - -This code is licensed under the MIT license. -*/ -package rc2 - -import ( - "crypto/cipher" - "encoding/binary" -) - -// The rc2 block size in bytes -const BlockSize = 8 - -type rc2Cipher struct { - k [64]uint16 -} - -// New returns a new rc2 cipher with the given key and effective key length t1 -func New(key []byte, t1 int) (cipher.Block, error) { - // TODO(dgryski): error checking for key length - return &rc2Cipher{ - k: expandKey(key, t1), - }, nil -} - -func (*rc2Cipher) BlockSize() int { return BlockSize } - -var piTable = [256]byte{ - 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, - 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, - 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, - 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, - 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, - 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, - 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, - 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, - 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, - 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, - 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, - 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, - 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, - 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, - 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, - 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, -} - -func expandKey(key []byte, t1 int) [64]uint16 { - - l := make([]byte, 128) - copy(l, key) - - var t = len(key) - var t8 = (t1 + 7) / 8 - var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) - - for i := len(key); i < 128; i++ { - l[i] = piTable[l[i-1]+l[uint8(i-t)]] - } - - l[128-t8] = piTable[l[128-t8]&tm] - - for i := 127 - t8; i >= 0; i-- { - l[i] = piTable[l[i+1]^l[i+t8]] - } - - var k [64]uint16 - - for i := range k { - k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 - } - - return k -} - -func rotl16(x uint16, b uint) uint16 { - return (x >> (16 - b)) | (x << b) -} - -func (c *rc2Cipher) Encrypt(dst, src []byte) { - - r0 := binary.LittleEndian.Uint16(src[0:]) - r1 := binary.LittleEndian.Uint16(src[2:]) - r2 := binary.LittleEndian.Uint16(src[4:]) - r3 := binary.LittleEndian.Uint16(src[6:]) - - var j int - - for j <= 16 { - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - - } - - r0 = r0 + c.k[r3&63] - r1 = r1 + c.k[r0&63] - r2 = r2 + c.k[r1&63] - r3 = r3 + c.k[r2&63] - - for j <= 40 { - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - - } - - r0 = r0 + c.k[r3&63] - r1 = r1 + c.k[r0&63] - r2 = r2 + c.k[r1&63] - r3 = r3 + c.k[r2&63] - - for j <= 60 { - // mix r0 - r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) - r0 = rotl16(r0, 1) - j++ - - // mix r1 - r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) - r1 = rotl16(r1, 2) - j++ - - // mix r2 - r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) - r2 = rotl16(r2, 3) - j++ - - // mix r3 - r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) - r3 = rotl16(r3, 5) - j++ - } - - binary.LittleEndian.PutUint16(dst[0:], r0) - binary.LittleEndian.PutUint16(dst[2:], r1) - binary.LittleEndian.PutUint16(dst[4:], r2) - binary.LittleEndian.PutUint16(dst[6:], r3) -} - -func (c *rc2Cipher) Decrypt(dst, src []byte) { - - r0 := binary.LittleEndian.Uint16(src[0:]) - r1 := binary.LittleEndian.Uint16(src[2:]) - r2 := binary.LittleEndian.Uint16(src[4:]) - r3 := binary.LittleEndian.Uint16(src[6:]) - - j := 63 - - for j >= 44 { - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - } - - r3 = r3 - c.k[r2&63] - r2 = r2 - c.k[r1&63] - r1 = r1 - c.k[r0&63] - r0 = r0 - c.k[r3&63] - - for j >= 20 { - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - - } - - r3 = r3 - c.k[r2&63] - r2 = r2 - c.k[r1&63] - r1 = r1 - c.k[r0&63] - r0 = r0 - c.k[r3&63] - - for j >= 0 { - // unmix r3 - r3 = rotl16(r3, 16-5) - r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) - j-- - - // unmix r2 - r2 = rotl16(r2, 16-3) - r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) - j-- - - // unmix r1 - r1 = rotl16(r1, 16-2) - r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) - j-- - - // unmix r0 - r0 = rotl16(r0, 16-1) - r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) - j-- - - } - - binary.LittleEndian.PutUint16(dst[0:], r0) - binary.LittleEndian.PutUint16(dst[2:], r1) - binary.LittleEndian.PutUint16(dst[4:], r2) - binary.LittleEndian.PutUint16(dst[6:], r3) -} diff --git a/mantle/vendor/golang.org/x/crypto/pkcs12/mac.go b/mantle/vendor/golang.org/x/crypto/pkcs12/mac.go deleted file mode 100644 index 5f38aa7d..00000000 --- a/mantle/vendor/golang.org/x/crypto/pkcs12/mac.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/x509/pkix" - "encoding/asn1" -) - -type macData struct { - Mac digestInfo - MacSalt []byte - Iterations int `asn1:"optional,default:1"` -} - -// from PKCS#7: -type digestInfo struct { - Algorithm pkix.AlgorithmIdentifier - Digest []byte -} - -var ( - oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) -) - -func verifyMac(macData *macData, message, password []byte) error { - if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { - return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) - } - - key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) - - mac := hmac.New(sha1.New, key) - mac.Write(message) - expectedMAC := mac.Sum(nil) - - if !hmac.Equal(macData.Mac.Digest, expectedMAC) { - return ErrIncorrectPassword - } - return nil -} diff --git a/mantle/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/mantle/vendor/golang.org/x/crypto/pkcs12/pbkdf.go deleted file mode 100644 index 5c419d41..00000000 --- a/mantle/vendor/golang.org/x/crypto/pkcs12/pbkdf.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "bytes" - "crypto/sha1" - "math/big" -) - -var ( - one = big.NewInt(1) -) - -// sha1Sum returns the SHA-1 hash of in. -func sha1Sum(in []byte) []byte { - sum := sha1.Sum(in) - return sum[:] -} - -// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of -// repeats of pattern. -func fillWithRepeats(pattern []byte, v int) []byte { - if len(pattern) == 0 { - return nil - } - outputLen := v * ((len(pattern) + v - 1) / v) - return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] -} - -func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { - // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments - - // Let H be a hash function built around a compression function f: - - // Z_2^u x Z_2^v -> Z_2^u - - // (that is, H has a chaining variable and output of length u bits, and - // the message input to the compression function of H is v bits). The - // values for u and v are as follows: - - // HASH FUNCTION VALUE u VALUE v - // MD2, MD5 128 512 - // SHA-1 160 512 - // SHA-224 224 512 - // SHA-256 256 512 - // SHA-384 384 1024 - // SHA-512 512 1024 - // SHA-512/224 224 1024 - // SHA-512/256 256 1024 - - // Furthermore, let r be the iteration count. - - // We assume here that u and v are both multiples of 8, as are the - // lengths of the password and salt strings (which we denote by p and s, - // respectively) and the number n of pseudorandom bits required. In - // addition, u and v are of course non-zero. - - // For information on security considerations for MD5 [19], see [25] and - // [1], and on those for MD2, see [18]. - - // The following procedure can be used to produce pseudorandom bits for - // a particular "purpose" that is identified by a byte called "ID". - // This standard specifies 3 different values for the ID byte: - - // 1. If ID=1, then the pseudorandom bits being produced are to be used - // as key material for performing encryption or decryption. - - // 2. If ID=2, then the pseudorandom bits being produced are to be used - // as an IV (Initial Value) for encryption or decryption. - - // 3. If ID=3, then the pseudorandom bits being produced are to be used - // as an integrity key for MACing. - - // 1. Construct a string, D (the "diversifier"), by concatenating v/8 - // copies of ID. - var D []byte - for i := 0; i < v; i++ { - D = append(D, ID) - } - - // 2. Concatenate copies of the salt together to create a string S of - // length v(ceiling(s/v)) bits (the final copy of the salt may be - // truncated to create S). Note that if the salt is the empty - // string, then so is S. - - S := fillWithRepeats(salt, v) - - // 3. Concatenate copies of the password together to create a string P - // of length v(ceiling(p/v)) bits (the final copy of the password - // may be truncated to create P). Note that if the password is the - // empty string, then so is P. - - P := fillWithRepeats(password, v) - - // 4. Set I=S||P to be the concatenation of S and P. - I := append(S, P...) - - // 5. Set c=ceiling(n/u). - c := (size + u - 1) / u - - // 6. For i=1, 2, ..., c, do the following: - A := make([]byte, c*20) - var IjBuf []byte - for i := 0; i < c; i++ { - // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, - // H(H(H(... H(D||I)))) - Ai := hash(append(D, I...)) - for j := 1; j < r; j++ { - Ai = hash(Ai) - } - copy(A[i*20:], Ai[:]) - - if i < c-1 { // skip on last iteration - // B. Concatenate copies of Ai to create a string B of length v - // bits (the final copy of Ai may be truncated to create B). - var B []byte - for len(B) < v { - B = append(B, Ai[:]...) - } - B = B[:v] - - // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit - // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by - // setting I_j=(I_j+B+1) mod 2^v for each j. - { - Bbi := new(big.Int).SetBytes(B) - Ij := new(big.Int) - - for j := 0; j < len(I)/v; j++ { - Ij.SetBytes(I[j*v : (j+1)*v]) - Ij.Add(Ij, Bbi) - Ij.Add(Ij, one) - Ijb := Ij.Bytes() - // We expect Ijb to be exactly v bytes, - // if it is longer or shorter we must - // adjust it accordingly. - if len(Ijb) > v { - Ijb = Ijb[len(Ijb)-v:] - } - if len(Ijb) < v { - if IjBuf == nil { - IjBuf = make([]byte, v) - } - bytesShort := v - len(Ijb) - for i := 0; i < bytesShort; i++ { - IjBuf[i] = 0 - } - copy(IjBuf[bytesShort:], Ijb) - Ijb = IjBuf - } - copy(I[j*v:(j+1)*v], Ijb) - } - } - } - } - // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom - // bit string, A. - - // 8. Use the first n bits of A as the output of this entire process. - return A[:size] - - // If the above process is being used to generate a DES key, the process - // should be used to create 64 random bits, and the key's parity bits - // should be set after the 64 bits have been produced. Similar concerns - // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any - // similar keys with parity bits "built into them". -} diff --git a/mantle/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/mantle/vendor/golang.org/x/crypto/pkcs12/pkcs12.go deleted file mode 100644 index 3a89bdb3..00000000 --- a/mantle/vendor/golang.org/x/crypto/pkcs12/pkcs12.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pkcs12 implements some of PKCS#12. -// -// This implementation is distilled from https://tools.ietf.org/html/rfc7292 -// and referenced documents. It is intended for decoding P12/PFX-stored -// certificates and keys for use with the crypto/tls package. -// -// This package is frozen. If it's missing functionality you need, consider -// an alternative like software.sslmate.com/src/go-pkcs12. -package pkcs12 - -import ( - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/hex" - "encoding/pem" - "errors" -) - -var ( - oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) - oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) - - oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) - oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) - oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) - - errUnknownAttributeOID = errors.New("pkcs12: unknown attribute OID") -) - -type pfxPdu struct { - Version int - AuthSafe contentInfo - MacData macData `asn1:"optional"` -} - -type contentInfo struct { - ContentType asn1.ObjectIdentifier - Content asn1.RawValue `asn1:"tag:0,explicit,optional"` -} - -type encryptedData struct { - Version int - EncryptedContentInfo encryptedContentInfo -} - -type encryptedContentInfo struct { - ContentType asn1.ObjectIdentifier - ContentEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedContent []byte `asn1:"tag:0,optional"` -} - -func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { - return i.ContentEncryptionAlgorithm -} - -func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } - -type safeBag struct { - Id asn1.ObjectIdentifier - Value asn1.RawValue `asn1:"tag:0,explicit"` - Attributes []pkcs12Attribute `asn1:"set,optional"` -} - -type pkcs12Attribute struct { - Id asn1.ObjectIdentifier - Value asn1.RawValue `asn1:"set"` -} - -type encryptedPrivateKeyInfo struct { - AlgorithmIdentifier pkix.AlgorithmIdentifier - EncryptedData []byte -} - -func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { - return i.AlgorithmIdentifier -} - -func (i encryptedPrivateKeyInfo) Data() []byte { - return i.EncryptedData -} - -// PEM block types -const ( - certificateType = "CERTIFICATE" - privateKeyType = "PRIVATE KEY" -) - -// unmarshal calls asn1.Unmarshal, but also returns an error if there is any -// trailing data after unmarshaling. -func unmarshal(in []byte, out interface{}) error { - trailing, err := asn1.Unmarshal(in, out) - if err != nil { - return err - } - if len(trailing) != 0 { - return errors.New("pkcs12: trailing data found") - } - return nil -} - -// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. -// Unknown attributes are discarded. -// -// Note that although the returned PEM blocks for private keys have type -// "PRIVATE KEY", the bytes are not encoded according to PKCS #8, but according -// to PKCS #1 for RSA keys and SEC 1 for ECDSA keys. -func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { - encodedPassword, err := bmpString(password) - if err != nil { - return nil, ErrIncorrectPassword - } - - bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) - - if err != nil { - return nil, err - } - - blocks := make([]*pem.Block, 0, len(bags)) - for _, bag := range bags { - block, err := convertBag(&bag, encodedPassword) - if err != nil { - return nil, err - } - blocks = append(blocks, block) - } - - return blocks, nil -} - -func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { - block := &pem.Block{ - Headers: make(map[string]string), - } - - for _, attribute := range bag.Attributes { - k, v, err := convertAttribute(&attribute) - if err == errUnknownAttributeOID { - continue - } - if err != nil { - return nil, err - } - block.Headers[k] = v - } - - switch { - case bag.Id.Equal(oidCertBag): - block.Type = certificateType - certsData, err := decodeCertBag(bag.Value.Bytes) - if err != nil { - return nil, err - } - block.Bytes = certsData - case bag.Id.Equal(oidPKCS8ShroundedKeyBag): - block.Type = privateKeyType - - key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) - if err != nil { - return nil, err - } - - switch key := key.(type) { - case *rsa.PrivateKey: - block.Bytes = x509.MarshalPKCS1PrivateKey(key) - case *ecdsa.PrivateKey: - block.Bytes, err = x509.MarshalECPrivateKey(key) - if err != nil { - return nil, err - } - default: - return nil, errors.New("found unknown private key type in PKCS#8 wrapping") - } - default: - return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) - } - return block, nil -} - -func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { - isString := false - - switch { - case attribute.Id.Equal(oidFriendlyName): - key = "friendlyName" - isString = true - case attribute.Id.Equal(oidLocalKeyID): - key = "localKeyId" - case attribute.Id.Equal(oidMicrosoftCSPName): - // This key is chosen to match OpenSSL. - key = "Microsoft CSP Name" - isString = true - default: - return "", "", errUnknownAttributeOID - } - - if isString { - if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { - return "", "", err - } - if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { - return "", "", err - } - } else { - var id []byte - if err := unmarshal(attribute.Value.Bytes, &id); err != nil { - return "", "", err - } - value = hex.EncodeToString(id) - } - - return key, value, nil -} - -// Decode extracts a certificate and private key from pfxData. This function -// assumes that there is only one certificate and only one private key in the -// pfxData; if there are more use ToPEM instead. -func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { - encodedPassword, err := bmpString(password) - if err != nil { - return nil, nil, err - } - - bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) - if err != nil { - return nil, nil, err - } - - if len(bags) != 2 { - err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") - return - } - - for _, bag := range bags { - switch { - case bag.Id.Equal(oidCertBag): - if certificate != nil { - err = errors.New("pkcs12: expected exactly one certificate bag") - } - - certsData, err := decodeCertBag(bag.Value.Bytes) - if err != nil { - return nil, nil, err - } - certs, err := x509.ParseCertificates(certsData) - if err != nil { - return nil, nil, err - } - if len(certs) != 1 { - err = errors.New("pkcs12: expected exactly one certificate in the certBag") - return nil, nil, err - } - certificate = certs[0] - - case bag.Id.Equal(oidPKCS8ShroundedKeyBag): - if privateKey != nil { - err = errors.New("pkcs12: expected exactly one key bag") - return nil, nil, err - } - - if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { - return nil, nil, err - } - } - } - - if certificate == nil { - return nil, nil, errors.New("pkcs12: certificate missing") - } - if privateKey == nil { - return nil, nil, errors.New("pkcs12: private key missing") - } - - return -} - -func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { - pfx := new(pfxPdu) - if err := unmarshal(p12Data, pfx); err != nil { - return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) - } - - if pfx.Version != 3 { - return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") - } - - if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { - return nil, nil, NotImplementedError("only password-protected PFX is implemented") - } - - // unmarshal the explicit bytes in the content for type 'data' - if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { - return nil, nil, err - } - - if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { - return nil, nil, errors.New("pkcs12: no MAC in data") - } - - if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { - if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { - // some implementations use an empty byte array - // for the empty string password try one more - // time with empty-empty password - password = nil - err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) - } - if err != nil { - return nil, nil, err - } - } - - var authenticatedSafe []contentInfo - if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { - return nil, nil, err - } - - if len(authenticatedSafe) != 2 { - return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") - } - - for _, ci := range authenticatedSafe { - var data []byte - - switch { - case ci.ContentType.Equal(oidDataContentType): - if err := unmarshal(ci.Content.Bytes, &data); err != nil { - return nil, nil, err - } - case ci.ContentType.Equal(oidEncryptedDataContentType): - var encryptedData encryptedData - if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { - return nil, nil, err - } - if encryptedData.Version != 0 { - return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") - } - if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { - return nil, nil, err - } - default: - return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") - } - - var safeContents []safeBag - if err := unmarshal(data, &safeContents); err != nil { - return nil, nil, err - } - bags = append(bags, safeContents...) - } - - return bags, password, nil -} diff --git a/mantle/vendor/golang.org/x/crypto/pkcs12/safebags.go b/mantle/vendor/golang.org/x/crypto/pkcs12/safebags.go deleted file mode 100644 index def1f7b9..00000000 --- a/mantle/vendor/golang.org/x/crypto/pkcs12/safebags.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pkcs12 - -import ( - "crypto/x509" - "encoding/asn1" - "errors" -) - -var ( - // see https://tools.ietf.org/html/rfc7292#appendix-D - oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) - oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) - oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) -) - -type certBag struct { - Id asn1.ObjectIdentifier - Data []byte `asn1:"tag:0,explicit"` -} - -func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { - pkinfo := new(encryptedPrivateKeyInfo) - if err = unmarshal(asn1Data, pkinfo); err != nil { - return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) - } - - pkData, err := pbDecrypt(pkinfo, password) - if err != nil { - return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) - } - - ret := new(asn1.RawValue) - if err = unmarshal(pkData, ret); err != nil { - return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) - } - - if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { - return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) - } - - return privateKey, nil -} - -func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { - bag := new(certBag) - if err := unmarshal(asn1Data, bag); err != nil { - return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) - } - if !bag.Id.Equal(oidCertTypeX509Certificate) { - return nil, NotImplementedError("only X509 certificates are supported") - } - return bag.Data, nil -} diff --git a/mantle/vendor/golang.org/x/crypto/ssh/agent/client.go b/mantle/vendor/golang.org/x/crypto/ssh/agent/client.go index b909471c..3cfe723d 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -25,7 +25,6 @@ import ( "math/big" "sync" - "crypto" "golang.org/x/crypto/ed25519" "golang.org/x/crypto/ssh" ) @@ -771,19 +770,26 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, return s.agent.Sign(s.pub, data) } -func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) { +func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) { + if algorithm == "" || algorithm == s.pub.Type() { + return s.Sign(rand, data) + } + var flags SignatureFlags - if opts != nil { - switch opts.HashFunc() { - case crypto.SHA256: - flags = SignatureFlagRsaSha256 - case crypto.SHA512: - flags = SignatureFlagRsaSha512 - } + switch algorithm { + case ssh.KeyAlgoRSASHA256: + flags = SignatureFlagRsaSha256 + case ssh.KeyAlgoRSASHA512: + flags = SignatureFlagRsaSha512 + default: + return nil, fmt.Errorf("agent: unsupported algorithm %q", algorithm) } + return s.agent.SignWithFlags(s.pub, data, flags) } +var _ ssh.AlgorithmSigner = &agentKeyringSigner{} + // Calls an extension method. It is up to the agent implementation as to whether or not // any particular extension is supported and may always return an error. Because the // type of the response is up to the implementation, this returns the bytes of the diff --git a/mantle/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/mantle/vendor/golang.org/x/crypto/ssh/agent/keyring.go index c9d97943..21bfa870 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -113,7 +113,7 @@ func (r *keyring) Unlock(passphrase []byte) error { // expireKeysLocked removes expired keys from the keyring. If a key was added // with a lifetimesecs contraint and seconds >= lifetimesecs seconds have -// ellapsed, it is removed. The caller *must* be holding the keyring mutex. +// elapsed, it is removed. The caller *must* be holding the keyring mutex. func (r *keyring) expireKeysLocked() { for _, k := range r.keys { if k.expire != nil && time.Now().After(*k.expire) { @@ -205,9 +205,9 @@ func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureF var algorithm string switch flags { case SignatureFlagRsaSha256: - algorithm = ssh.SigAlgoRSASHA2256 + algorithm = ssh.KeyAlgoRSASHA256 case SignatureFlagRsaSha512: - algorithm = ssh.SigAlgoRSASHA2512 + algorithm = ssh.KeyAlgoRSASHA512 default: return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags) } diff --git a/mantle/vendor/golang.org/x/crypto/ssh/certs.go b/mantle/vendor/golang.org/x/crypto/ssh/certs.go index 916c840b..a69e2249 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/certs.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/certs.go @@ -14,8 +14,10 @@ import ( "time" ) -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. +// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear +// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. +// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't +// appear in the Signature.Format field. const ( CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" @@ -25,6 +27,21 @@ const ( CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" + + // CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a + // Certificate.Type (or PublicKey.Type), but only in + // ClientConfig.HostKeyAlgorithms. + CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com" + CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com" +) + +const ( + // Deprecated: use CertAlgoRSAv01. + CertSigAlgoRSAv01 = CertAlgoRSAv01 + // Deprecated: use CertAlgoRSASHA256v01. + CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01 + // Deprecated: use CertAlgoRSASHA512v01. + CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01 ) // Certificate types distinguish between host and user @@ -423,6 +440,16 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { } c.SignatureKey = authority.PublicKey() + // Default to KeyAlgoRSASHA512 for ssh-rsa signers. + if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) + if err != nil { + return err + } + c.Signature = sig + return nil + } + sig, err := authority.Sign(rand, c.bytesForSigning()) if err != nil { return err @@ -431,26 +458,40 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { return nil } -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, - KeyAlgoED25519: CertAlgoED25519v01, - KeyAlgoSKED25519: CertAlgoSKED25519v01, +// certKeyAlgoNames is a mapping from known certificate algorithm names to the +// corresponding public key signature algorithm. +var certKeyAlgoNames = map[string]string{ + CertAlgoRSAv01: KeyAlgoRSA, + CertAlgoRSASHA256v01: KeyAlgoRSASHA256, + CertAlgoRSASHA512v01: KeyAlgoRSASHA512, + CertAlgoDSAv01: KeyAlgoDSA, + CertAlgoECDSA256v01: KeyAlgoECDSA256, + CertAlgoECDSA384v01: KeyAlgoECDSA384, + CertAlgoECDSA521v01: KeyAlgoECDSA521, + CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256, + CertAlgoED25519v01: KeyAlgoED25519, + CertAlgoSKED25519v01: KeyAlgoSKED25519, +} + +// underlyingAlgo returns the signature algorithm associated with algo (which is +// an advertised or negotiated public key or host key algorithm). These are +// usually the same, except for certificate algorithms. +func underlyingAlgo(algo string) string { + if a, ok := certKeyAlgoNames[algo]; ok { + return a + } + return algo } -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo +// certificateAlgo returns the certificate algorithms that uses the provided +// underlying signature algorithm. +func certificateAlgo(algo string) (certAlgo string, ok bool) { + for certName, algoName := range certKeyAlgoNames { + if algoName == algo { + return certName, true } } - panic("unknown cert algorithm") + return "", false } func (cert *Certificate) bytesForSigning() []byte { @@ -494,13 +535,13 @@ func (c *Certificate) Marshal() []byte { return result } -// Type returns the key name. It is part of the PublicKey interface. +// Type returns the certificate algorithm name. It is part of the PublicKey interface. func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] + certName, ok := certificateAlgo(c.Key.Type()) if !ok { - panic("unknown cert key type " + c.Key.Type()) + panic("unknown certificate type for key type " + c.Key.Type()) } - return algo + return certName } // Verify verifies a signature against the certificate's public diff --git a/mantle/vendor/golang.org/x/crypto/ssh/cipher.go b/mantle/vendor/golang.org/x/crypto/ssh/cipher.go index bddbde5d..f8bdf498 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/cipher.go @@ -394,6 +394,10 @@ func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) } c.incIV() + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + padding := plain[0] if padding < 4 { // padding is a byte, so it automatically satisfies @@ -710,6 +714,10 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([ plain := c.buf[4:contentEnd] s.XORKeyStream(plain, plain) + if len(plain) == 0 { + return nil, errors.New("ssh: empty packet") + } + padding := plain[0] if padding < 4 { // padding is a byte, so it automatically satisfies diff --git a/mantle/vendor/golang.org/x/crypto/ssh/client.go b/mantle/vendor/golang.org/x/crypto/ssh/client.go index 99f68bd3..bdc356cb 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/client.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/client.go @@ -113,14 +113,18 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e return c.clientAuthenticate(config) } -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { +// verifyHostKeySignature verifies the host key obtained in the key exchange. +// algo is the negotiated algorithm, and may be a certificate type. +func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error { sig, rest, ok := parseSignatureBody(result.Signature) if len(rest) > 0 || !ok { return errors.New("ssh: signature parse error") } + if a := underlyingAlgo(algo); sig.Format != a { + return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a) + } + return hostKey.Verify(result.H, sig) } @@ -224,11 +228,11 @@ type ClientConfig struct { // be used for the connection. If empty, a reasonable default is used. ClientVersion string - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of + // HostKeyAlgorithms lists the public key algorithms that the client will + // accept from the server for host key authentication, in order of // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + // string returned from a PublicKey.Type method may be used, or + // any of the CertAlgo and KeyAlgo constants. HostKeyAlgorithms []string // Timeout is the maximum amount of time for the TCP connection to establish. diff --git a/mantle/vendor/golang.org/x/crypto/ssh/client_auth.go b/mantle/vendor/golang.org/x/crypto/ssh/client_auth.go index c611aeb6..409b5ea1 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "strings" ) type authResult int @@ -29,6 +30,33 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { if err != nil { return err } + // The server may choose to send a SSH_MSG_EXT_INFO at this point (if we + // advertised willingness to receive one, which we always do) or not. See + // RFC 8308, Section 2.4. + extensions := make(map[string][]byte) + if len(packet) > 0 && packet[0] == msgExtInfo { + var extInfo extInfoMsg + if err := Unmarshal(packet, &extInfo); err != nil { + return err + } + payload := extInfo.Payload + for i := uint32(0); i < extInfo.NumExtensions; i++ { + name, rest, ok := parseString(payload) + if !ok { + return parseError(msgExtInfo) + } + value, rest, ok := parseString(rest) + if !ok { + return parseError(msgExtInfo) + } + extensions[string(name)] = value + payload = rest + } + packet, err = c.transport.readPacket() + if err != nil { + return err + } + } var serviceAccept serviceAcceptMsg if err := Unmarshal(packet, &serviceAccept); err != nil { return err @@ -41,7 +69,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { sessionID := c.transport.getSessionID() for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { return err } @@ -93,7 +121,7 @@ type AuthMethod interface { // If authentication is not successful, a []string of alternative // method names is returned. If the slice is nil, it will be ignored // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) + auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) // method returns the RFC 4252 method name. method() string @@ -102,7 +130,7 @@ type AuthMethod interface { // "none" authentication, RFC 4252 section 5.2. type noneAuth int -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { if err := c.writePacket(Marshal(&userAuthRequestMsg{ User: user, Service: serviceSSH, @@ -122,7 +150,7 @@ func (n *noneAuth) method() string { // a function call, e.g. by prompting the user. type passwordCallback func() (password string, err error) -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { type passwordAuthMsg struct { User string `sshtype:"50"` Service string @@ -189,7 +217,46 @@ func (cb publicKeyCallback) method() string { return "publickey" } -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { + keyFormat := signer.PublicKey().Type() + + // Like in sendKexInit, if the public key implements AlgorithmSigner we + // assume it supports all algorithms, otherwise only the key format one. + as, ok := signer.(AlgorithmSigner) + if !ok { + return algorithmSignerWrapper{signer}, keyFormat + } + + extPayload, ok := extensions["server-sig-algs"] + if !ok { + // If there is no "server-sig-algs" extension, fall back to the key + // format algorithm. + return as, keyFormat + } + + // The server-sig-algs extension only carries underlying signature + // algorithm, but we are trying to select a protocol-level public key + // algorithm, which might be a certificate type. Extend the list of server + // supported algorithms to include the corresponding certificate algorithms. + serverAlgos := strings.Split(string(extPayload), ",") + for _, algo := range serverAlgos { + if certAlgo, ok := certificateAlgo(algo); ok { + serverAlgos = append(serverAlgos, certAlgo) + } + } + + keyAlgos := algorithmsForKeyFormat(keyFormat) + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) + if err != nil { + // If there is no overlap, try the key anyway with the key format + // algorithm, to support servers that fail to list all supported + // algorithms. + return as, keyFormat + } + return as, algo +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { // Authentication is performed by sending an enquiry to test if a key is // acceptable to the remote. If the key is acceptable, the client will // attempt to authenticate with the valid key. If not the client will repeat @@ -201,7 +268,10 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand } var methods []string for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) + pub := signer.PublicKey() + as, algo := pickSignatureAlgorithm(signer, extensions) + + ok, err := validateKey(pub, algo, user, c) if err != nil { return authFailure, nil, err } @@ -209,13 +279,13 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand continue } - pub := signer.PublicKey() pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + data := buildDataSignedForAuth(session, userAuthRequestMsg{ User: user, Service: serviceSSH, Method: cb.method(), - }, []byte(pub.Type()), pubKey)) + }, algo, pubKey) + sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) if err != nil { return authFailure, nil, err } @@ -229,7 +299,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand Service: serviceSSH, Method: cb.method(), HasSig: true, - Algoname: pub.Type(), + Algoname: algo, PubKey: pubKey, Sig: sig, } @@ -266,26 +336,25 @@ func containsMethod(methods []string, method string) bool { } // validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { +func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) { pubKey := key.Marshal() msg := publickeyAuthMsg{ User: user, Service: serviceSSH, Method: "publickey", HasSig: false, - Algoname: key.Type(), + Algoname: algo, PubKey: pubKey, } if err := c.writePacket(Marshal(&msg)); err != nil { return false, err } - return confirmKeyAck(key, c) + return confirmKeyAck(key, algo, c) } -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { +func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) { pubKey := key.Marshal() - algoname := key.Type() for { packet, err := c.readPacket() @@ -302,14 +371,14 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { if err := Unmarshal(packet, &msg); err != nil { return false, err } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) { return false, nil } return true, nil case msgUserAuthFailure: return false, nil default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0]) } } } @@ -330,6 +399,7 @@ func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMet // along with a list of remaining authentication methods to try next and // an error if an unexpected response was received. func handleAuthResponse(c packetConn) (authResult, []string, error) { + gotMsgExtInfo := false for { packet, err := c.readPacket() if err != nil { @@ -341,6 +411,12 @@ func handleAuthResponse(c packetConn) (authResult, []string, error) { if err := handleBannerResponse(c, packet); err != nil { return authFailure, nil, err } + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + gotMsgExtInfo = true case msgUserAuthFailure: var msg userAuthFailureMsg if err := Unmarshal(packet, &msg); err != nil { @@ -380,10 +456,10 @@ func handleBannerResponse(c packetConn, packet []byte) error { // disabling echoing (e.g. for passwords), and return all the answers. // Challenge may be called multiple times in a single session. After // successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be +// questions, for which the name and instruction messages should be // printed. RFC 4256 section 3.3 details how the UI should behave for // both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) +type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error) // KeyboardInteractive returns an AuthMethod using a prompt/response // sequence controlled by the server. @@ -395,7 +471,7 @@ func (cb KeyboardInteractiveChallenge) method() string { return "keyboard-interactive" } -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { type initiateMsg struct { User string `sshtype:"50"` Service string @@ -412,6 +488,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, err } + gotMsgExtInfo := false for { packet, err := c.readPacket() if err != nil { @@ -425,6 +502,13 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, err } continue + case msgExtInfo: + // Ignore post-authentication RFC 8308 extensions, once. + if gotMsgExtInfo { + return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + gotMsgExtInfo = true + continue case msgUserAuthInfoRequest: // OK case msgUserAuthFailure: @@ -465,7 +549,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") } - answers, err := cb(msg.User, msg.Instruction, prompts, echos) + answers, err := cb(msg.Name, msg.Instruction, prompts, echos) if err != nil { return authFailure, nil, err } @@ -497,9 +581,9 @@ type retryableAuthMethod struct { maxTries int } -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) { for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) + ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions) if ok != authFailure || err != nil { // either success, partial success or error terminate return ok, methods, err } @@ -542,7 +626,7 @@ type gssAPIWithMICCallback struct { target string } -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { +func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) { m := &userAuthRequestMsg{ User: user, Service: serviceSSH, diff --git a/mantle/vendor/golang.org/x/crypto/ssh/common.go b/mantle/vendor/golang.org/x/crypto/ssh/common.go index 290382d0..2a47a61d 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/common.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/common.go @@ -44,11 +44,11 @@ var preferredCiphers = []string{ // supportedKexAlgos specifies the supported key-exchange algorithms in // preference order. var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, // P384 and P521 are not constant-time yet, but since we don't // reuse ephemeral keys, using them for ECDH should be OK. kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1, } // serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden @@ -61,18 +61,20 @@ var serverForbiddenKexAlgos = map[string]struct{}{ // preferredKexAlgos specifies the default preference for key-exchange algorithms // in preference order. var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, } // supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods // of authenticating servers) in preference order. var supportedHostKeyAlgos = []string{ + CertAlgoRSASHA512v01, CertAlgoRSASHA256v01, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSASHA512, KeyAlgoRSASHA256, KeyAlgoRSA, KeyAlgoDSA, KeyAlgoED25519, @@ -87,19 +89,33 @@ var supportedMACs = []string{ var supportedCompressions = []string{compressionNone} -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. +// hashFuncs keeps the mapping of supported signature algorithms to their +// respective hashes needed for signing and verification. var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, + KeyAlgoRSA: crypto.SHA1, + KeyAlgoRSASHA256: crypto.SHA256, + KeyAlgoRSASHA512: crypto.SHA512, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + // KeyAlgoED25519 doesn't pre-hash. + KeyAlgoSKECDSA256: crypto.SHA256, + KeyAlgoSKED25519: crypto.SHA256, +} + +// algorithmsForKeyFormat returns the supported signature algorithms for a given +// public key format (PublicKey.Type), in order of preference. See RFC 8332, +// Section 2. See also the note in sendKexInit on backwards compatibility. +func algorithmsForKeyFormat(keyFormat string) []string { + switch keyFormat { + case KeyAlgoRSA: + return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA} + case CertAlgoRSAv01: + return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01} + default: + return []string{keyFormat} + } } // unexpectedMessageError results when the SSH message that we received didn't @@ -146,6 +162,11 @@ func (a *directionAlgorithms) rekeyBytes() int64 { return 1 << 30 } +var aeadCiphers = map[string]bool{ + gcmCipherID: true, + chacha20Poly1305ID: true, +} + type algorithms struct { kex string hostKey string @@ -181,14 +202,18 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs return } - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return + if !aeadCiphers[ctos.Cipher] { + ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } } - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return + if !aeadCiphers[stoc.Cipher] { + stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } } ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) @@ -272,8 +297,9 @@ func (c *Config) SetDefaults() { } // buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { +// possession of a private key. See RFC 4252, section 7. algo is the advertised +// algorithm, and may be a certificate type. +func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte { data := struct { Session []byte Type byte @@ -281,7 +307,7 @@ func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubK Service string Method string Sign bool - Algo []byte + Algo string PubKey []byte }{ sessionID, diff --git a/mantle/vendor/golang.org/x/crypto/ssh/handshake.go b/mantle/vendor/golang.org/x/crypto/ssh/handshake.go index 2b10b05a..f815cdb4 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/handshake.go @@ -455,14 +455,36 @@ func (t *handshakeTransport) sendKexInit() error { } io.ReadFull(rand.Reader, msg.Cookie[:]) - if len(t.hostKeys) > 0 { + isServer := len(t.hostKeys) > 0 + if isServer { for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) + // If k is an AlgorithmSigner, presume it supports all signature algorithms + // associated with the key format. (Ideally AlgorithmSigner would have a + // method to advertise supported algorithms, but it doesn't. This means that + // adding support for a new algorithm is a breaking change, as we will + // immediately negotiate it even if existing implementations don't support + // it. If that ever happens, we'll have to figure something out.) + // If k is not an AlgorithmSigner, we can only assume it only supports the + // algorithms that matches the key format. (This means that Sign can't pick + // a different default.) + keyFormat := k.PublicKey().Type() + if _, ok := k.(AlgorithmSigner); ok { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) + } else { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) + } } } else { msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + + // As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what + // algorithms the server supports for public key authentication. See RFC + // 8303, Section 2.1. + msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1) + msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...) + msg.KexAlgos = append(msg.KexAlgos, "ext-info-c") } + packet := Marshal(msg) // writePacket destroys the contents, so save a copy. @@ -582,9 +604,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { var result *kexResult if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) + result, err = t.server(kex, &magics) } else { - result, err = t.client(kex, t.algorithms, &magics) + result, err = t.client(kex, &magics) } if err != nil { @@ -611,19 +633,52 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { return nil } -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k +// algorithmSignerWrapper is an AlgorithmSigner that only supports the default +// key format algorithm. +// +// This is technically a violation of the AlgorithmSigner interface, but it +// should be unreachable given where we use this. Anyway, at least it returns an +// error instead of panicing or producing an incorrect signature. +type algorithmSignerWrapper struct { + Signer +} + +func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if algorithm != underlyingAlgo(a.PublicKey().Type()) { + return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm") + } + return a.Sign(rand, data) +} + +func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { + for _, k := range hostKeys { + if algo == k.PublicKey().Type() { + return algorithmSignerWrapper{k} } + k, ok := k.(AlgorithmSigner) + if !ok { + continue + } + for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) { + if algo == a { + return k + } + } + } + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { + hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey) + if hostKey == nil { + return nil, errors.New("ssh: internal error: negotiated unsupported signature type") } - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey) return r, err } -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { +func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) { result, err := kex.Client(t.conn, t.config.Rand, magics) if err != nil { return nil, err @@ -634,7 +689,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics * return nil, err } - if err := verifyHostKeySignature(hostKey, result); err != nil { + if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil { return nil, err } diff --git a/mantle/vendor/golang.org/x/crypto/ssh/kex.go b/mantle/vendor/golang.org/x/crypto/ssh/kex.go index 766e9293..927a90cd 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/kex.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/kex.go @@ -20,12 +20,14 @@ import ( ) const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" + kexAlgoCurve25519SHA256 = "curve25519-sha256" // For the following kex only the client half contains a production // ready implementation. The server half only consists of a minimal @@ -75,8 +77,9 @@ func (m *handshakeMagics) write(w io.Writer) { // kexAlgorithm abstracts different key exchange algorithms. type kexAlgorithm interface { // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + // with a hostkey. algo is the negotiated algorithm, and may + // be a certificate type. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error) // Client runs the client-side key agreement. Caller is // responsible for verifying the host key signature. @@ -86,6 +89,7 @@ type kexAlgorithm interface { // dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. type dhGroup struct { g, p, pMinus1 *big.Int + hashFunc crypto.Hash } func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { @@ -96,8 +100,6 @@ func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, } func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - var x *big.Int for { var err error @@ -132,7 +134,7 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha return nil, err } - h := hashFunc.New() + h := group.hashFunc.New() magics.write(h) writeString(h, kexDHReply.HostKey) writeInt(h, X) @@ -146,12 +148,11 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha K: K, HostKey: kexDHReply.HostKey, Signature: kexDHReply.Signature, - Hash: crypto.SHA1, + Hash: group.hashFunc, }, nil } -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return @@ -179,7 +180,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha hostKeyBytes := priv.PublicKey().Marshal() - h := hashFunc.New() + h := group.hashFunc.New() magics.write(h) writeString(h, hostKeyBytes) writeInt(h, kexDHInit.X) @@ -193,7 +194,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) + sig, err := signAndMarshal(priv, randSource, H, algo) if err != nil { return nil, err } @@ -211,7 +212,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha K: K, HostKey: hostKeyBytes, Signature: sig, - Hash: crypto.SHA1, + Hash: group.hashFunc, }, err } @@ -314,7 +315,7 @@ func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { return true } -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return nil, err @@ -359,7 +360,7 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) + sig, err := signAndMarshal(priv, rand, H, algo) if err != nil { return nil, err } @@ -384,39 +385,62 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p }, nil } +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + var kexAlgoMap = map[string]kexAlgorithm{} func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. + // This is the group called diffie-hellman-group1-sha1 in + // RFC 4253 and Oakley Group 2 in RFC 2409. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + hashFunc: crypto.SHA1, } - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. + // This are the groups called diffie-hellman-group14-sha1 and + // diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268, + // and Oakley Group 14 in RFC 3526. p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + group14 := &dhGroup{ g: new(big.Int).SetInt64(2), p: p, pMinus1: new(big.Int).Sub(p, bigOne), } + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA1, + } + kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{ + g: group14.g, p: group14.p, pMinus1: group14.pMinus1, + hashFunc: crypto.SHA256, + } + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} + kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{} kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} } -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +// curve25519sha256 implements the curve25519-sha256 (formerly known as +// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731. type curve25519sha256 struct{} type curve25519KeyPair struct { @@ -486,7 +510,7 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh }, nil } -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { packet, err := c.readPacket() if err != nil { return @@ -527,7 +551,7 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh H := h.Sum(nil) - sig, err := signAndMarshal(priv, rand, H) + sig, err := signAndMarshal(priv, rand, H, algo) if err != nil { return nil, err } @@ -553,7 +577,6 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh // diffie-hellman-group-exchange-sha256 key agreement protocols, // as described in RFC 4419 type dhGEXSHA struct { - g, p *big.Int hashFunc crypto.Hash } @@ -563,14 +586,7 @@ const ( dhGroupExchangeMaximumBits = 8192 ) -func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { - return nil, fmt.Errorf("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil -} - -func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { +func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { // Send GexRequest kexDHGexRequest := kexDHGexRequestMsg{ MinBits: dhGroupExchangeMinimumBits, @@ -587,35 +603,29 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake return nil, err } - var kexDHGexGroup kexDHGexGroupMsg - if err = Unmarshal(packet, &kexDHGexGroup); err != nil { + var msg kexDHGexGroupMsg + if err = Unmarshal(packet, &msg); err != nil { return nil, err } // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) + if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits { + return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen()) } - gex.p = kexDHGexGroup.P - gex.g = kexDHGexGroup.G - - // Check if g is safe by verifing that g > 1 and g < p - 1 - one := big.NewInt(1) - var pMinusOne = &big.Int{} - pMinusOne.Sub(gex.p, one) - if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { + // Check if g is safe by verifying that 1 < g < p-1 + pMinusOne := new(big.Int).Sub(msg.P, bigOne) + if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 { return nil, fmt.Errorf("ssh: server provided gex g is not safe") } // Send GexInit - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) + pHalf := new(big.Int).Rsh(msg.P, 1) x, err := rand.Int(randSource, pHalf) if err != nil { return nil, err } - X := new(big.Int).Exp(gex.g, x, gex.p) + X := new(big.Int).Exp(msg.G, x, msg.P) kexDHGexInit := kexDHGexInitMsg{ X: X, } @@ -634,13 +644,13 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake return nil, err } - kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) - if err != nil { - return nil, err + if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") } + kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P) - // Check if k is safe by verifing that k > 1 and k < p - 1 - if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { + // Check if k is safe by verifying that k > 1 and k < p - 1 + if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 { return nil, fmt.Errorf("ssh: derived k is not safe") } @@ -650,8 +660,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) + writeInt(h, msg.P) + writeInt(h, msg.G) writeInt(h, X) writeInt(h, kexDHGexReply.Y) K := make([]byte, intLength(kInt)) @@ -670,7 +680,7 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. // // This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) { // Receive GexRequest packet, err := c.readPacket() if err != nil { @@ -681,35 +691,17 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake return } - // smoosh the user's preferred size into our own limits - if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits - } - if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits - } - // fix min/max if they're inconsistent. technically, we could just pout - // and hang up, but there's no harm in giving them the benefit of the - // doubt and just picking a bitsize for them. - if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { - kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits - } - if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { - kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits - } - // Send GexGroup // This is the group called diffie-hellman-group14-sha1 in RFC // 4253 and Oakley Group 14 in RFC 3526. p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - gex.p = p - gex.g = big.NewInt(2) + g := big.NewInt(2) - kexDHGexGroup := kexDHGexGroupMsg{ - P: gex.p, - G: gex.g, + msg := &kexDHGexGroupMsg{ + P: p, + G: g, } - if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { + if err := c.writePacket(Marshal(msg)); err != nil { return nil, err } @@ -723,19 +715,19 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake return } - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) + pHalf := new(big.Int).Rsh(p, 1) y, err := rand.Int(randSource, pHalf) if err != nil { return } + Y := new(big.Int).Exp(g, y, p) - Y := new(big.Int).Exp(gex.g, y, gex.p) - kInt, err := gex.diffieHellman(kexDHGexInit.X, y) - if err != nil { - return nil, err + pMinusOne := new(big.Int).Sub(p, bigOne) + if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") } + kInt := new(big.Int).Exp(kexDHGexInit.X, y, p) hostKeyBytes := priv.PublicKey().Marshal() @@ -745,8 +737,8 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) + writeInt(h, p) + writeInt(h, g) writeInt(h, kexDHGexInit.X) writeInt(h, Y) @@ -758,7 +750,7 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake // H is already a hash, but the hostkey signing will apply its // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) + sig, err := signAndMarshal(priv, randSource, H, algo) if err != nil { return nil, err } diff --git a/mantle/vendor/golang.org/x/crypto/ssh/keys.go b/mantle/vendor/golang.org/x/crypto/ssh/keys.go index 31f26349..1c7de1a6 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/keys.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/keys.go @@ -30,8 +30,9 @@ import ( "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" ) -// These constants represent the algorithm names for key types supported by this -// package. +// Public key algorithms names. These values can appear in PublicKey.Type, +// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner +// arguments. const ( KeyAlgoRSA = "ssh-rsa" KeyAlgoDSA = "ssh-dss" @@ -41,16 +42,21 @@ const ( KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" KeyAlgoED25519 = "ssh-ed25519" KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" + + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not + // public key formats, so they can't appear as a PublicKey.Type. The + // corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + KeyAlgoRSASHA256 = "rsa-sha2-256" + KeyAlgoRSASHA512 = "rsa-sha2-512" ) -// These constants represent non-default signature algorithms that are supported -// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See -// [PROTOCOL.agent] section 4.5.1 and -// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 const ( - SigAlgoRSA = "ssh-rsa" - SigAlgoRSASHA2256 = "rsa-sha2-256" - SigAlgoRSASHA2512 = "rsa-sha2-512" + // Deprecated: use KeyAlgoRSA. + SigAlgoRSA = KeyAlgoRSA + // Deprecated: use KeyAlgoRSASHA256. + SigAlgoRSASHA2256 = KeyAlgoRSASHA256 + // Deprecated: use KeyAlgoRSASHA512. + SigAlgoRSASHA2512 = KeyAlgoRSASHA512 ) // parsePubKey parses a public key of the given algorithm. @@ -70,7 +76,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err case KeyAlgoSKED25519: return parseSKEd25519(in) case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) + cert, err := parseCert(in, certKeyAlgoNames[algo]) if err != nil { return nil, nil, err } @@ -289,18 +295,21 @@ func MarshalAuthorizedKey(key PublicKey) []byte { return b.Bytes() } -// PublicKey is an abstraction of different types of public keys. +// PublicKey represents a public key using an unspecified algorithm. +// +// Some PublicKeys provided by this package also implement CryptoPublicKey. type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". + // Type returns the key format name, e.g. "ssh-rsa". Type() string - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. + // Marshal returns the serialized key data in SSH wire format, with the name + // prefix. To unmarshal the returned data, use the ParsePublicKey function. Marshal() []byte - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. + // Verify that sig is a signature on the given data using this key. This + // method will hash the data appropriately first. sig.Format is allowed to + // be any signature algorithm compatible with the key type, the caller + // should check if it has more stringent requirements. Verify(data []byte, sig *Signature) error } @@ -311,25 +320,32 @@ type CryptoPublicKey interface { } // A Signer can create signatures that verify against a public key. +// +// Some Signers provided by this package also implement AlgorithmSigner. type Signer interface { - // PublicKey returns an associated PublicKey instance. + // PublicKey returns the associated PublicKey. PublicKey() PublicKey - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. + // Sign returns a signature for the given data. This method will hash the + // data appropriately first. The signature algorithm is expected to match + // the key format returned by the PublicKey.Type method (and not to be any + // alternative algorithm supported by the key format). Sign(rand io.Reader, data []byte) (*Signature, error) } -// A AlgorithmSigner is a Signer that also supports specifying a specific -// algorithm to use for signing. +// An AlgorithmSigner is a Signer that also supports specifying an algorithm to +// use for signing. +// +// An AlgorithmSigner can't advertise the algorithms it supports, so it should +// be prepared to be invoked with every algorithm supported by the public key +// format. type AlgorithmSigner interface { Signer - // SignWithAlgorithm is like Signer.Sign, but allows specification of a - // non-default signing algorithm. See the SigAlgo* constants in this - // package for signature algorithms supported by this package. Callers may - // pass an empty string for the algorithm in which case the AlgorithmSigner - // will use its default algorithm. + // SignWithAlgorithm is like Signer.Sign, but allows specifying a desired + // signing algorithm. Callers may pass an empty string for the algorithm in + // which case the AlgorithmSigner will use a default algorithm. This default + // doesn't currently control any behavior in this package. SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) } @@ -381,17 +397,11 @@ func (r *rsaPublicKey) Marshal() []byte { } func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - var hash crypto.Hash - switch sig.Format { - case SigAlgoRSA: - hash = crypto.SHA1 - case SigAlgoRSASHA2256: - hash = crypto.SHA256 - case SigAlgoRSASHA2512: - hash = crypto.SHA512 - default: + supportedAlgos := algorithmsForKeyFormat(r.Type()) + if !contains(supportedAlgos, sig.Format) { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) } + hash := hashFuncs[sig.Format] h := hash.New() h.Write(data) digest := h.Sum(nil) @@ -466,7 +476,7 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := crypto.SHA1.New() + h := hashFuncs[sig.Format].New() h.Write(data) digest := h.Sum(nil) @@ -499,7 +509,7 @@ func (k *dsaPrivateKey) PublicKey() PublicKey { } func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, "") + return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) } func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { @@ -507,7 +517,7 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) } - h := crypto.SHA1.New() + h := hashFuncs[k.PublicKey().Type()].New() h.Write(data) digest := h.Sum(nil) r, s, err := dsa.Sign(rand, k.PrivateKey, digest) @@ -603,19 +613,6 @@ func supportedEllipticCurve(curve elliptic.Curve) bool { return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() } -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - // parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { var w struct { @@ -671,7 +668,7 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := ecHash(k.Curve).New() + h := hashFuncs[sig.Format].New() h.Write(data) digest := h.Sum(nil) @@ -775,7 +772,7 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } - h := ecHash(k.Curve).New() + h := hashFuncs[sig.Format].New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -874,7 +871,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { return fmt.Errorf("invalid size %d for Ed25519 public key", l) } - h := sha256.New() + h := hashFuncs[sig.Format].New() h.Write([]byte(k.application)) appDigest := h.Sum(nil) @@ -961,44 +958,20 @@ func (s *wrappedSigner) PublicKey() PublicKey { } func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, "") + return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) } func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - var hashFunc crypto.Hash - - if _, ok := s.pubKey.(*rsaPublicKey); ok { - // RSA keys support a few hash functions determined by the requested signature algorithm - switch algorithm { - case "", SigAlgoRSA: - algorithm = SigAlgoRSA - hashFunc = crypto.SHA1 - case SigAlgoRSASHA2256: - hashFunc = crypto.SHA256 - case SigAlgoRSASHA2512: - hashFunc = crypto.SHA512 - default: - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - } else { - // The only supported algorithm for all other key types is the same as the type of the key - if algorithm == "" { - algorithm = s.pubKey.Type() - } else if algorithm != s.pubKey.Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } + if algorithm == "" { + algorithm = s.pubKey.Type() + } - switch key := s.pubKey.(type) { - case *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } + supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) + if !contains(supportedAlgos, algorithm) { + return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) } + hashFunc := hashFuncs[algorithm] var digest []byte if hashFunc != 0 { h := hashFunc.New() diff --git a/mantle/vendor/golang.org/x/crypto/ssh/messages.go b/mantle/vendor/golang.org/x/crypto/ssh/messages.go index ac41a416..19bc67c4 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/messages.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/messages.go @@ -141,6 +141,14 @@ type serviceAcceptMsg struct { Service string `sshtype:"6"` } +// See RFC 8308, section 2.3 +const msgExtInfo = 7 + +type extInfoMsg struct { + NumExtensions uint32 `sshtype:"7"` + Payload []byte `ssh:"rest"` +} + // See RFC 4252, section 5. const msgUserAuthRequest = 50 @@ -180,11 +188,11 @@ const msgUserAuthInfoRequest = 60 const msgUserAuthInfoResponse = 61 type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` + Name string `sshtype:"60"` + Instruction string + Language string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` } // See RFC 4254, section 5.1. @@ -782,6 +790,8 @@ func decode(packet []byte) (interface{}, error) { msg = new(serviceRequestMsg) case msgServiceAccept: msg = new(serviceAcceptMsg) + case msgExtInfo: + msg = new(extInfoMsg) case msgKexInit: msg = new(kexInitMsg) case msgKexDHInit: @@ -843,6 +853,7 @@ var packetTypeNames = map[byte]string{ msgDisconnect: "disconnectMsg", msgServiceRequest: "serviceRequestMsg", msgServiceAccept: "serviceAcceptMsg", + msgExtInfo: "extInfoMsg", msgKexInit: "kexInitMsg", msgKexDHInit: "kexDHInitMsg", msgKexDHReply: "kexDHReplyMsg", diff --git a/mantle/vendor/golang.org/x/crypto/ssh/server.go b/mantle/vendor/golang.org/x/crypto/ssh/server.go index b6911e83..70045bdf 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/server.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/server.go @@ -120,7 +120,7 @@ type ServerConfig struct { } // AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server +// key exists with the same public key format, it is replaced. Each server // config must have at least one host key. func (s *ServerConfig) AddHostKey(key Signer) { for i, k := range s.hostKeys { @@ -212,9 +212,10 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha } // signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) +// and serializes the result in SSH wire format. algo is the negotiate +// algorithm and may be a certificate type. +func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) { + sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo)) if err != nil { return nil, err } @@ -284,7 +285,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) func isAcceptableAlgo(algo string) bool { switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, + case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: return true } @@ -553,6 +554,7 @@ userAuthLoop: if !ok || len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } + // Ensure the public key algo and signature algo // are supported. Compare the private key // algorithm name that corresponds to algo with @@ -562,7 +564,12 @@ userAuthLoop: authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) break } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + if underlyingAlgo(algo) != sig.Format { + authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo) + break + } + + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData) if err := pubKey.Verify(signedData, sig); err != nil { return nil, err @@ -633,6 +640,30 @@ userAuthLoop: } authFailures++ + if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries { + // If we have hit the max attempts, don't bother sending the + // final SSH_MSG_USERAUTH_FAILURE message, since there are + // no more authentication methods which can be attempted, + // and this message may cause the client to re-attempt + // authentication while we send the disconnect message. + // Continue, and trigger the disconnect at the start of + // the loop. + // + // The SSH specification is somewhat confusing about this, + // RFC 4252 Section 5.1 requires each authentication failure + // be responded to with a respective SSH_MSG_USERAUTH_FAILURE + // message, but Section 4 says the server should disconnect + // after some number of attempts, but it isn't explicit which + // message should take precedence (i.e. should there be a failure + // message than a disconnect message, or if we are going to + // disconnect, should we only send that message.) + // + // Either way, OpenSSH disconnects immediately after the last + // failed authnetication attempt, and given they are typically + // considered the golden implementation it seems reasonable + // to match that behavior. + continue + } var failureMsg userAuthFailureMsg if config.PasswordCallback != nil { @@ -670,7 +701,7 @@ type sshClientKeyboardInteractive struct { *connection } -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { +func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) { if len(questions) != len(echos) { return nil, errors.New("ssh: echos and questions must have equal length") } @@ -682,6 +713,7 @@ func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, quest } if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Name: name, Instruction: instruction, NumPrompts: uint32(len(questions)), Prompts: prompts, diff --git a/mantle/vendor/golang.org/x/crypto/ssh/session.go b/mantle/vendor/golang.org/x/crypto/ssh/session.go index d3321f6b..eca31a22 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/session.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/session.go @@ -85,6 +85,7 @@ const ( IXANY = 39 IXOFF = 40 IMAXBEL = 41 + IUTF8 = 42 // RFC 8160 ISIG = 50 ICANON = 51 XCASE = 52 diff --git a/mantle/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/mantle/vendor/golang.org/x/crypto/ssh/terminal/terminal.go deleted file mode 100644 index a4d1919a..00000000 --- a/mantle/vendor/golang.org/x/crypto/ssh/terminal/terminal.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package terminal provides support functions for dealing with terminals, as -// commonly found on UNIX systems. -// -// Deprecated: this package moved to golang.org/x/term. -package terminal - -import ( - "io" - - "golang.org/x/term" -) - -// EscapeCodes contains escape sequences that can be written to the terminal in -// order to achieve different styles of text. -type EscapeCodes = term.EscapeCodes - -// Terminal contains the state for running a VT100 terminal that is capable of -// reading lines of input. -type Terminal = term.Terminal - -// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is -// a local terminal, that terminal must first have been put into raw mode. -// prompt is a string that is written at the start of each input line (i.e. -// "> "). -func NewTerminal(c io.ReadWriter, prompt string) *Terminal { - return term.NewTerminal(c, prompt) -} - -// ErrPasteIndicator may be returned from ReadLine as the error, in addition -// to valid line data. It indicates that bracketed paste mode is enabled and -// that the returned line consists only of pasted data. Programs may wish to -// interpret pasted data more literally than typed data. -var ErrPasteIndicator = term.ErrPasteIndicator - -// State contains the state of a terminal. -type State = term.State - -// IsTerminal returns whether the given file descriptor is a terminal. -func IsTerminal(fd int) bool { - return term.IsTerminal(fd) -} - -// ReadPassword reads a line of input from a terminal without local echo. This -// is commonly used for inputting passwords and other sensitive data. The slice -// returned does not include the \n. -func ReadPassword(fd int) ([]byte, error) { - return term.ReadPassword(fd) -} - -// MakeRaw puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd int) (*State, error) { - return term.MakeRaw(fd) -} - -// Restore restores the terminal connected to the given file descriptor to a -// previous state. -func Restore(fd int, oldState *State) error { - return term.Restore(fd, oldState) -} - -// GetState returns the current state of a terminal which may be useful to -// restore the terminal after a signal. -func GetState(fd int) (*State, error) { - return term.GetState(fd) -} - -// GetSize returns the dimensions of the given terminal. -func GetSize(fd int) (width, height int, err error) { - return term.GetSize(fd) -} diff --git a/mantle/vendor/golang.org/x/crypto/ssh/transport.go b/mantle/vendor/golang.org/x/crypto/ssh/transport.go index 49ddc2e7..acf5a21b 100644 --- a/mantle/vendor/golang.org/x/crypto/ssh/transport.go +++ b/mantle/vendor/golang.org/x/crypto/ssh/transport.go @@ -238,15 +238,19 @@ var ( // (to setup server->client keys) or clientKeys (for client->server keys). func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] iv := make([]byte, cipherMode.ivSize) key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) generateKeyMaterial(iv, d.ivTag, kex) generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) + + var macKey []byte + if !aeadCiphers[algs.Cipher] { + macMode := macModes[algs.MAC] + macKey = make([]byte, macMode.keySize) + generateKeyMaterial(macKey, d.macKeyTag, kex) + } return cipherModes[algs.Cipher].create(key, iv, macKey, algs) } diff --git a/mantle/vendor/golang.org/x/net/AUTHORS b/mantle/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/mantle/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/mantle/vendor/golang.org/x/net/CONTRIBUTORS b/mantle/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/mantle/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/mantle/vendor/golang.org/x/net/context/context.go b/mantle/vendor/golang.org/x/net/context/context.go index cf66309c..a3c021d3 100644 --- a/mantle/vendor/golang.org/x/net/context/context.go +++ b/mantle/vendor/golang.org/x/net/context/context.go @@ -21,9 +21,9 @@ // explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } // // Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. diff --git a/mantle/vendor/golang.org/x/net/context/go17.go b/mantle/vendor/golang.org/x/net/context/go17.go index 0a54bdbc..344bd143 100644 --- a/mantle/vendor/golang.org/x/net/context/go17.go +++ b/mantle/vendor/golang.org/x/net/context/go17.go @@ -54,11 +54,11 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/mantle/vendor/golang.org/x/net/context/pre_go17.go b/mantle/vendor/golang.org/x/net/context/pre_go17.go index 7b6b6851..5270db5d 100644 --- a/mantle/vendor/golang.org/x/net/context/pre_go17.go +++ b/mantle/vendor/golang.org/x/net/context/pre_go17.go @@ -264,11 +264,11 @@ func (c *timerCtx) cancel(removeFromParent bool, err error) { // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } diff --git a/mantle/vendor/golang.org/x/net/http/httpguts/httplex.go b/mantle/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e85..c79aa73f 100644 --- a/mantle/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/mantle/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -173,15 +173,13 @@ func tokenEqual(t1, t2 string) bool { // isLWS reports whether b is linear white space, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// -// LWS = [CRLF] 1*( SP | HT ) +// LWS = [CRLF] 1*( SP | HT ) func isLWS(b byte) bool { return b == ' ' || b == '\t' } // isCTL reports whether b is a control byte, according // to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// -// CTL = +// CTL = func isCTL(b byte) bool { const del = 0x7f // a CTL return b < ' ' || b == del @@ -191,13 +189,12 @@ func isCTL(b byte) bool { // HTTP/2 imposes the additional restriction that uppercase ASCII // letters are not allowed. // -// RFC 7230 says: -// -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -270,28 +267,27 @@ var validHostByte = [256]bool{ // ValidHeaderFieldValue reports whether v is a valid "field-value" according to // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : // -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = // // http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : // -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = +// TEXT = +// LWS = [CRLF] 1*( SP | HT ) +// CTL = // // RFC 7230 says: -// -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" // // http2 further says: "Similarly, HTTP/2 allows header field values // that are not valid. While most of the values that can be encoded diff --git a/mantle/vendor/golang.org/x/net/http2/client_conn_pool.go b/mantle/vendor/golang.org/x/net/http2/client_conn_pool.go index 780968d6..c936843e 100644 --- a/mantle/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/mantle/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -139,6 +139,7 @@ func (p *clientConnPool) getStartDialLocked(ctx context.Context, addr string) *d func (c *dialCall) dial(ctx context.Context, addr string) { const singleUse = false // shared conn c.res, c.err = c.p.t.dialClientConn(ctx, addr, singleUse) + close(c.done) c.p.mu.Lock() delete(c.p.dialing, addr) @@ -146,8 +147,6 @@ func (c *dialCall) dial(ctx context.Context, addr string) { c.p.addConnLocked(addr, c.res) } c.p.mu.Unlock() - - close(c.done) } // addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't diff --git a/mantle/vendor/golang.org/x/net/http2/errors.go b/mantle/vendor/golang.org/x/net/http2/errors.go index f2067dab..2663e5d2 100644 --- a/mantle/vendor/golang.org/x/net/http2/errors.go +++ b/mantle/vendor/golang.org/x/net/http2/errors.go @@ -136,7 +136,7 @@ func (e headerFieldNameError) Error() string { type headerFieldValueError string func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value for %q", string(e)) + return fmt.Sprintf("invalid header field value %q", string(e)) } var ( diff --git a/mantle/vendor/golang.org/x/net/http2/frame.go b/mantle/vendor/golang.org/x/net/http2/frame.go index 0178647e..96a74790 100644 --- a/mantle/vendor/golang.org/x/net/http2/frame.go +++ b/mantle/vendor/golang.org/x/net/http2/frame.go @@ -1532,8 +1532,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } if !httpguts.ValidHeaderFieldValue(hf.Value) { - // Don't include the value in the error, because it may be sensitive. - invalid = headerFieldValueError(hf.Name) + invalid = headerFieldValueError(hf.Value) } isPseudo := strings.HasPrefix(hf.Name, ":") if isPseudo { diff --git a/mantle/vendor/golang.org/x/net/http2/hpack/huffman.go b/mantle/vendor/golang.org/x/net/http2/hpack/huffman.go index 20d083a7..fe0b84cc 100644 --- a/mantle/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/mantle/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -169,50 +169,25 @@ func buildRootHuffmanNode() { // AppendHuffmanString appends s, as encoded in Huffman codes, to dst // and returns the extended buffer. func AppendHuffmanString(dst []byte, s string) []byte { - // This relies on the maximum huffman code length being 30 (See tables.go huffmanCodeLen array) - // So if a uint64 buffer has less than 32 valid bits can always accommodate another huffmanCode. - var ( - x uint64 // buffer - n uint // number valid of bits present in x - ) + rembits := uint8(8) + for i := 0; i < len(s); i++ { - c := s[i] - n += uint(huffmanCodeLen[c]) - x <<= huffmanCodeLen[c] % 64 - x |= uint64(huffmanCodes[c]) - if n >= 32 { - n %= 32 // Normally would be -= 32 but %= 32 informs compiler 0 <= n <= 31 for upcoming shift - y := uint32(x >> n) // Compiler doesn't combine memory writes if y isn't uint32 - dst = append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) + if rembits == 8 { + dst = append(dst, 0) } + dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i]) } - // Add padding bits if necessary - if over := n % 8; over > 0 { - const ( - eosCode = 0x3fffffff - eosNBits = 30 - eosPadByte = eosCode >> (eosNBits - 8) - ) - pad := 8 - over - x = (x << pad) | (eosPadByte >> over) - n += pad // 8 now divides into n exactly - } - // n in (0, 8, 16, 24, 32) - switch n / 8 { - case 0: - return dst - case 1: - return append(dst, byte(x)) - case 2: - y := uint16(x) - return append(dst, byte(y>>8), byte(y)) - case 3: - y := uint16(x >> 8) - return append(dst, byte(y>>8), byte(y), byte(x)) + + if rembits < 8 { + // special EOS symbol + code := uint32(0x3fffffff) + nbits := uint8(30) + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t } - // case 4: - y := uint32(x) - return append(dst, byte(y>>24), byte(y>>16), byte(y>>8), byte(y)) + + return dst } // HuffmanEncodeLength returns the number of bytes required to encode @@ -224,3 +199,35 @@ func HuffmanEncodeLength(s string) uint64 { } return (n + 7) / 8 } + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/mantle/vendor/golang.org/x/net/http2/http2.go b/mantle/vendor/golang.org/x/net/http2/http2.go index 479ba4b2..5571ccfd 100644 --- a/mantle/vendor/golang.org/x/net/http2/http2.go +++ b/mantle/vendor/golang.org/x/net/http2/http2.go @@ -13,6 +13,7 @@ // See https://http2.github.io/ for more information on HTTP/2. // // See https://http2.golang.org/ for a test server running this code. +// package http2 // import "golang.org/x/net/http2" import ( @@ -175,11 +176,10 @@ func (s SettingID) String() string { // name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: -// -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " func validWireHeaderFieldName(v string) bool { if len(v) == 0 { return false @@ -365,8 +365,8 @@ func (s *sorter) SortStrings(ss []string) { // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. // // For now this is only used a quick check for deciding when to clean // up Opaque URLs before sending requests from the Transport. diff --git a/mantle/vendor/golang.org/x/net/http2/server.go b/mantle/vendor/golang.org/x/net/http2/server.go index 47524a61..c67e9b7f 100644 --- a/mantle/vendor/golang.org/x/net/http2/server.go +++ b/mantle/vendor/golang.org/x/net/http2/server.go @@ -315,20 +315,6 @@ type ServeConnOpts struct { // requests. If nil, BaseConfig.Handler is used. If BaseConfig // or BaseConfig.Handler is nil, http.DefaultServeMux is used. Handler http.Handler - - // UpgradeRequest is an initial request received on a connection - // undergoing an h2c upgrade. The request body must have been - // completely read from the connection before calling ServeConn, - // and the 101 Switching Protocols response written. - UpgradeRequest *http.Request - - // Settings is the decoded contents of the HTTP2-Settings header - // in an h2c upgrade request. - Settings []byte - - // SawClientPreface is set if the HTTP/2 connection preface - // has already been read from the connection. - SawClientPreface bool } func (o *ServeConnOpts) context() context.Context { @@ -397,7 +383,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { headerTableSize: initialHeaderTableSize, serveG: newGoroutineLock(), pushEnabled: true, - sawClientPreface: opts.SawClientPreface, } s.state.registerConn(sc) @@ -415,7 +400,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { if s.NewWriteScheduler != nil { sc.writeSched = s.NewWriteScheduler() } else { - sc.writeSched = NewPriorityWriteScheduler(nil) + sc.writeSched = NewRandomWriteScheduler() } // These start at the RFC-specified defaults. If there is a higher @@ -480,27 +465,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { } } - if opts.Settings != nil { - fr := &SettingsFrame{ - FrameHeader: FrameHeader{valid: true}, - p: opts.Settings, - } - if err := fr.ForeachSetting(sc.processSetting); err != nil { - sc.rejectConn(ErrCodeProtocol, "invalid settings") - return - } - opts.Settings = nil - } - if hook := testHookGetServerConn; hook != nil { hook(sc) } - - if opts.UpgradeRequest != nil { - sc.upgradeRequest(opts.UpgradeRequest) - opts.UpgradeRequest = nil - } - sc.serve() } @@ -545,7 +512,6 @@ type serverConn struct { // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() pushEnabled bool - sawClientPreface bool // preface has already been read, used in h2c upgrade sawFirstSettings bool // got the initial SETTINGS frame after the preface needToSendSettingsAck bool unackedSettings int // how many SETTINGS have we sent without ACKs? @@ -753,15 +719,7 @@ func (sc *serverConn) canonicalHeader(v string) string { sc.canonHeader = make(map[string]string) } cv = http.CanonicalHeaderKey(v) - // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of - // entries in the canonHeader cache. This should be larger than the number - // of unique, uncommon header keys likely to be sent by the peer, while not - // so high as to permit unreasonable memory usage if the peer sends an unbounded - // number of unique header keys. - const maxCachedCanonicalHeaders = 32 - if len(sc.canonHeader) < maxCachedCanonicalHeaders { - sc.canonHeader[v] = cv - } + sc.canonHeader[v] = cv return cv } @@ -1008,9 +966,6 @@ var errPrefaceTimeout = errors.New("timeout waiting for client preface") // returns errPrefaceTimeout on timeout, or an error if the greeting // is invalid. func (sc *serverConn) readPreface() error { - if sc.sawClientPreface { - return nil - } errc := make(chan error, 1) go func() { // Read the client preface @@ -1952,26 +1907,6 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { return nil } -func (sc *serverConn) upgradeRequest(req *http.Request) { - sc.serveG.check() - id := uint32(1) - sc.maxClientStreamID = id - st := sc.newStream(id, 0, stateHalfClosedRemote) - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - rw := sc.newResponseWriter(st, req) - - // Disable any read deadline set by the net/http package - // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { - sc.conn.SetReadDeadline(time.Time{}) - } - - go sc.runHandler(rw, req, sc.handler.ServeHTTP) -} - func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { sc := st.sc sc.serveG.check() @@ -2202,11 +2137,6 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r } req = req.WithContext(st.ctx) - rw := sc.newResponseWriter(st, req) - return rw, req, nil -} - -func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *responseWriter { rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw *rws = responseWriterState{} // zero all the fields @@ -2215,7 +2145,10 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response rws.bw.Reset(chunkWriter{rws}) rws.stream = st rws.req = req - return &responseWriter{rws: rws} + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil } // Run on its own goroutine. @@ -2375,18 +2308,17 @@ type requestBody struct { _ incomparable stream *stream conn *serverConn - closeOnce sync.Once // for use by Close only - sawEOF bool // for use by Read only - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue } func (b *requestBody) Close() error { - b.closeOnce.Do(func() { - if b.pipe != nil { - b.pipe.BreakWithError(errClosedBody) - } - }) + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true return nil } @@ -2430,6 +2362,7 @@ type responseWriterState struct { // immutable within a request: stream *stream req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't conn *serverConn // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc @@ -2605,9 +2538,8 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: -// -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const TrailerPrefix = "Trailer:" // promoteUndeclaredTrailers permits http.Handlers to set trailers @@ -2703,7 +2635,8 @@ func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. // In the future we might block things over 599 (600 and above aren't defined - // at http://httpwg.org/specs/rfc7231.html#status.codes). + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). // But for now any three digits. // // We used to send "HTTP/1.1 000 0" on the wire in responses but there's @@ -2724,41 +2657,13 @@ func (w *responseWriter) WriteHeader(code int) { } func (rws *responseWriterState) writeHeader(code int) { - if rws.wroteHeader { - return - } - - checkWriteHeaderCode(code) - - // Handle informational headers - if code >= 100 && code <= 199 { - // Per RFC 8297 we must not clear the current header map - h := rws.handlerHeader - - _, cl := h["Content-Length"] - _, te := h["Transfer-Encoding"] - if cl || te { - h = h.Clone() - h.Del("Content-Length") - h.Del("Transfer-Encoding") - } - - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: code, - h: h, - endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true + if !rws.wroteHeader { + checkWriteHeaderCode(code) + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) } - - return - } - - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) } } diff --git a/mantle/vendor/golang.org/x/net/http2/transport.go b/mantle/vendor/golang.org/x/net/http2/transport.go index 4ded4dfd..b5e2ac64 100644 --- a/mantle/vendor/golang.org/x/net/http2/transport.go +++ b/mantle/vendor/golang.org/x/net/http2/transport.go @@ -16,6 +16,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "log" "math" mathrand "math/rand" @@ -500,14 +501,12 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res if req, err = shouldRetryRequest(req, err); err == nil { // After the first retry, do exponential backoff with 10% jitter. if retry == 0 { - t.vlogf("RoundTrip retrying after failure: %v", err) continue } backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) select { case <-time.After(time.Second * time.Duration(backoff)): - t.vlogf("RoundTrip retrying after failure: %v", err) continue case <-req.Context().Done(): err = req.Context().Err() @@ -733,13 +732,11 @@ func (cc *ClientConn) healthCheck() { // trigger the healthCheck again if there is no frame received. ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() - cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) if err != nil { - cc.vlogf("http2: Transport health check failure: %v", err) cc.closeForLostPing() - } else { - cc.vlogf("http2: Transport health check success") + cc.t.connPool().MarkDead(cc) + return } } @@ -910,24 +907,6 @@ func (cc *ClientConn) onIdleTimeout() { cc.closeIfIdle() } -func (cc *ClientConn) closeConn() error { - t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) - defer t.Stop() - return cc.tconn.Close() -} - -// A tls.Conn.Close can hang for a long time if the peer is unresponsive. -// Try to shut it down more aggressively. -func (cc *ClientConn) forceCloseConn() { - tc, ok := cc.tconn.(*tls.Conn) - if !ok { - return - } - if nc := tlsUnderlyingConn(tc); nc != nil { - nc.Close() - } -} - func (cc *ClientConn) closeIfIdle() { cc.mu.Lock() if len(cc.streams) > 0 || cc.streamsReserved > 0 { @@ -942,7 +921,7 @@ func (cc *ClientConn) closeIfIdle() { if VerboseLogs { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) } - cc.closeConn() + cc.tconn.Close() } func (cc *ClientConn) isDoNotReuseAndIdle() bool { @@ -959,7 +938,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { return err } // Wait for all in-flight streams to complete or connection to close - done := make(chan struct{}) + done := make(chan error, 1) cancelled := false // guarded by cc.mu go func() { cc.mu.Lock() @@ -967,7 +946,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { for { if len(cc.streams) == 0 || cc.closed { cc.closed = true - close(done) + done <- cc.tconn.Close() break } if cancelled { @@ -978,8 +957,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { }() shutdownEnterWaitStateHook() select { - case <-done: - return cc.closeConn() + case err := <-done: + return err case <-ctx.Done(): cc.mu.Lock() // Free the goroutine above @@ -1022,9 +1001,9 @@ func (cc *ClientConn) closeForError(err error) error { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() - cc.mu.Unlock() - return cc.closeConn() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + return cc.tconn.Close() } // Close closes the client connection immediately. @@ -1145,49 +1124,36 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { } } - handleResponseHeaders := func() (*http.Response, error) { - res := cs.res - if res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - cs.abortRequestBodyWrite() - } - res.Request = req - res.TLS = cc.tlsState - if res.Body == noBody && actualContentLength(req) == 0 { - // If there isn't a request or response body still being - // written, then wait for the stream to be closed before - // RoundTrip returns. - if err := waitDone(); err != nil { - return nil, err - } - } - return res, nil - } - for { select { case <-cs.respHeaderRecv: - return handleResponseHeaders() - case <-cs.abort: - select { - case <-cs.respHeaderRecv: - // If both cs.respHeaderRecv and cs.abort are signaling, - // pick respHeaderRecv. The server probably wrote the - // response and immediately reset the stream. - // golang.org/issue/49645 - return handleResponseHeaders() - default: - waitDone() - return nil, cs.abortErr + res := cs.res + if res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + cs.abortRequestBodyWrite() } + res.Request = req + res.TLS = cc.tlsState + if res.Body == noBody && actualContentLength(req) == 0 { + // If there isn't a request or response body still being + // written, then wait for the stream to be closed before + // RoundTrip returns. + if err := waitDone(); err != nil { + return nil, err + } + } + return res, nil + case <-cs.abort: + waitDone() + return nil, cs.abortErr case <-ctx.Done(): err := ctx.Err() cs.abortStream(err) @@ -1273,12 +1239,12 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } continueTimeout := cc.t.expectContinueTimeout() - if continueTimeout != 0 { - if !httpguts.HeaderValuesContainsToken(req.Header["Expect"], "100-continue") { - continueTimeout = 0 - } else { - cs.on100 = make(chan struct{}, 1) - } + if continueTimeout != 0 && + !httpguts.HeaderValuesContainsToken( + req.Header["Expect"], + "100-continue") { + continueTimeout = 0 + cs.on100 = make(chan struct{}, 1) } // Past this point (where we send request headers), it is possible for @@ -1347,7 +1313,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { case <-respHeaderTimer: return errTimeout case <-respHeaderRecv: - respHeaderRecv = nil respHeaderTimer = nil // keep waiting for END_STREAM case <-cs.abort: return cs.abortErr @@ -1769,8 +1734,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } for _, v := range vv { if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) } } } @@ -2000,7 +1964,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2) } cc.closed = true - defer cc.closeConn() + defer cc.tconn.Close() } cc.mu.Unlock() @@ -2047,8 +2011,8 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) - defer cc.closeConn() + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) defer close(cc.readerDone) if cc.idleTimer != nil { @@ -2903,12 +2867,7 @@ func (t *Transport) logf(format string, args ...interface{}) { log.Printf(format, args...) } -var noBody io.ReadCloser = noBodyReader{} - -type noBodyReader struct{} - -func (noBodyReader) Close() error { return nil } -func (noBodyReader) Read([]byte) (int, error) { return 0, io.EOF } +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) type missingBody struct{} diff --git a/mantle/vendor/golang.org/x/net/http2/writesched.go b/mantle/vendor/golang.org/x/net/http2/writesched.go index c7cd0017..f24d2b1e 100644 --- a/mantle/vendor/golang.org/x/net/http2/writesched.go +++ b/mantle/vendor/golang.org/x/net/http2/writesched.go @@ -32,8 +32,7 @@ type WriteScheduler interface { // Pop dequeues the next frame to write. Returns false if no frames can // be written. Frames with a given wr.StreamID() are Pop'd in the same - // order they are Push'd, except RST_STREAM frames. No frames should be - // discarded except by CloseStream. + // order they are Push'd. No frames should be discarded except by CloseStream. Pop() (wr FrameWriteRequest, ok bool) } @@ -53,7 +52,6 @@ type FrameWriteRequest struct { // stream is the stream on which this frame will be written. // nil for non-stream frames like PING and SETTINGS. - // nil for RST_STREAM streams, which use the StreamError.StreamID field instead. stream *stream // done, if non-nil, must be a buffered channel with space for diff --git a/mantle/vendor/golang.org/x/net/http2/writesched_priority.go b/mantle/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c66..2618b2c1 100644 --- a/mantle/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/mantle/vendor/golang.org/x/net/http2/writesched_priority.go @@ -383,15 +383,16 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { var n *priorityNode - if wr.isControl() { + if id := wr.StreamID(); id == 0 { n = &ws.root } else { - id := wr.StreamID() n = ws.nodes[id] if n == nil { // id is an idle or closed stream. wr should not be a HEADERS or - // DATA frame. In other case, we push wr onto the root, rather - // than creating a new priorityNode. + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. if wr.DataSize() > 0 { panic("add DATA on non-open stream") } diff --git a/mantle/vendor/golang.org/x/net/http2/writesched_random.go b/mantle/vendor/golang.org/x/net/http2/writesched_random.go index f2e55e05..9a7b9e58 100644 --- a/mantle/vendor/golang.org/x/net/http2/writesched_random.go +++ b/mantle/vendor/golang.org/x/net/http2/writesched_random.go @@ -45,11 +45,11 @@ func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityP } func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { - if wr.isControl() { + id := wr.StreamID() + if id == 0 { ws.zero.push(wr) return } - id := wr.StreamID() q, ok := ws.sq[id] if !ok { q = ws.queuePool.get() @@ -59,7 +59,7 @@ func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { } func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { - // Control and RST_STREAM frames first. + // Control frames first. if !ws.zero.empty() { return ws.zero.shift(), true } diff --git a/mantle/vendor/golang.org/x/net/idna/trieval.go b/mantle/vendor/golang.org/x/net/idna/trieval.go index 9c070a44..7a8cf889 100644 --- a/mantle/vendor/golang.org/x/net/idna/trieval.go +++ b/mantle/vendor/golang.org/x/net/idna/trieval.go @@ -17,23 +17,23 @@ package idna // // The per-rune values have the following format: // -// if mapped { -// if inlinedXOR { -// 15..13 inline XOR marker -// 12..11 unused -// 10..3 inline XOR mask -// } else { -// 15..3 index into xor or mapping table -// } -// } else { -// 15..14 unused -// 13 mayNeedNorm -// 12..11 attributes -// 10..8 joining type -// 7..3 category type -// } -// 2 use xor pattern -// 1..0 mapped category +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category // // See the definitions below for a more detailed description of the various // bits. diff --git a/mantle/vendor/golang.org/x/net/publicsuffix/list.go b/mantle/vendor/golang.org/x/net/publicsuffix/list.go deleted file mode 100644 index e2fddd64..00000000 --- a/mantle/vendor/golang.org/x/net/publicsuffix/list.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// Package publicsuffix provides a public suffix list based on data from -// https://publicsuffix.org/ -// -// A public suffix is one under which Internet users can directly register -// names. It is related to, but different from, a TLD (top level domain). -// -// "com" is a TLD (top level domain). Top level means it has no dots. -// -// "com" is also a public suffix. Amazon and Google have registered different -// siblings under that domain: "amazon.com" and "google.com". -// -// "au" is another TLD, again because it has no dots. But it's not "amazon.au". -// Instead, it's "amazon.com.au". -// -// "com.au" isn't an actual TLD, because it's not at the top level (it has -// dots). But it is an eTLD (effective TLD), because that's the branching point -// for domain name registrars. -// -// Another name for "an eTLD" is "a public suffix". Often, what's more of -// interest is the eTLD+1, or one more label than the public suffix. For -// example, browsers partition read/write access to HTTP cookies according to -// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from -// "google.com.au", but web pages served from "maps.google.com" can share -// cookies from "www.google.com", so you don't have to sign into Google Maps -// separately from signing into Google Web Search. Note that all four of those -// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, -// the last two are not (but share the same eTLD+1: "google.com"). -// -// All of these domains have the same eTLD+1: -// - "www.books.amazon.co.uk" -// - "books.amazon.co.uk" -// - "amazon.co.uk" -// -// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". -// -// There is no closed form algorithm to calculate the eTLD of a domain. -// Instead, the calculation is data driven. This package provides a -// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at -// https://publicsuffix.org/ -package publicsuffix // import "golang.org/x/net/publicsuffix" - -// TODO: specify case sensitivity and leading/trailing dot behavior for -// func PublicSuffix and func EffectiveTLDPlusOne. - -import ( - "fmt" - "net/http/cookiejar" - "strings" -) - -// List implements the cookiejar.PublicSuffixList interface by calling the -// PublicSuffix function. -var List cookiejar.PublicSuffixList = list{} - -type list struct{} - -func (list) PublicSuffix(domain string) string { - ps, _ := PublicSuffix(domain) - return ps -} - -func (list) String() string { - return version -} - -// PublicSuffix returns the public suffix of the domain using a copy of the -// publicsuffix.org database compiled into the library. -// -// icann is whether the public suffix is managed by the Internet Corporation -// for Assigned Names and Numbers. If not, the public suffix is either a -// privately managed domain (and in practice, not a top level domain) or an -// unmanaged top level domain (and not explicitly mentioned in the -// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN -// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and -// "cromulent" is an unmanaged top level domain. -// -// Use cases for distinguishing ICANN domains like "foo.com" from private -// domains like "foo.appspot.com" can be found at -// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases -func PublicSuffix(domain string) (publicSuffix string, icann bool) { - lo, hi := uint32(0), uint32(numTLD) - s, suffix, icannNode, wildcard := domain, len(domain), false, false -loop: - for { - dot := strings.LastIndex(s, ".") - if wildcard { - icann = icannNode - suffix = 1 + dot - } - if lo == hi { - break - } - f := find(s[1+dot:], lo, hi) - if f == notFound { - break - } - - u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) - icannNode = u&(1<>= nodesBitsICANN - u = children[u&(1<>= childrenBitsLo - hi = u & (1<>= childrenBitsHi - switch u & (1<>= childrenBitsNodeType - wildcard = u&(1<>= nodesBitsTextLength - offset := x & (1<(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) - -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/mantle/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/mantle/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index f3cde129..f8c484f5 100644 --- a/mantle/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/mantle/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !linux && !netbsd && !openbsd && arm64 -// +build !linux,!netbsd,!openbsd,arm64 +//go:build !linux && !netbsd && arm64 +// +build !linux,!netbsd,arm64 package cpu diff --git a/mantle/vendor/golang.org/x/sys/unix/mkall.sh b/mantle/vendor/golang.org/x/sys/unix/mkall.sh index 6fc18353..dcef4de6 100644 --- a/mantle/vendor/golang.org/x/sys/unix/mkall.sh +++ b/mantle/vendor/golang.org/x/sys/unix/mkall.sh @@ -73,12 +73,12 @@ aix_ppc64) darwin_amd64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm.go" + mkasm="go run mkasm_darwin.go" ;; darwin_arm64) mkerrors="$mkerrors -m64" mktypes="GOARCH=$GOARCH go tool cgo -godefs" - mkasm="go run mkasm.go" + mkasm="go run mkasm_darwin.go" ;; dragonfly_amd64) mkerrors="$mkerrors -m64" @@ -142,17 +142,17 @@ netbsd_arm64) mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_386) - mkasm="go run mkasm.go" mkerrors="$mkerrors -m32" - mksyscall="go run mksyscall.go -l32 -openbsd -libc" + mksyscall="go run mksyscall.go -l32 -openbsd" mksysctl="go run mksysctl_openbsd.go" + mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_amd64) - mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd -libc" + mksyscall="go run mksyscall.go -openbsd" mksysctl="go run mksysctl_openbsd.go" + mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; openbsd_arm) @@ -165,10 +165,10 @@ openbsd_arm) mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; openbsd_arm64) - mkasm="go run mkasm.go" mkerrors="$mkerrors -m64" - mksyscall="go run mksyscall.go -openbsd -libc" + mksyscall="go run mksyscall.go -openbsd" mksysctl="go run mksysctl_openbsd.go" + mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'" # Let the type of C char be signed for making the bare syscall # API consistent across platforms. mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" @@ -232,5 +232,5 @@ esac if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi - if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi + if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi ) | $run diff --git a/mantle/vendor/golang.org/x/sys/unix/syscall_aix.go b/mantle/vendor/golang.org/x/sys/unix/syscall_aix.go index 2db1b51e..ac579c60 100644 --- a/mantle/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/mantle/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -218,62 +218,13 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { } func recvmsgRaw(fd int, iov []Iovec, oob []byte, flags int, rsa *RawSockaddrAny) (n, oobn int, recvflags int, err error) { - var msg Msghdr - msg.Name = (*byte)(unsafe.Pointer(rsa)) - msg.Namelen = uint32(SizeofSockaddrAny) - var dummy byte - if len(oob) > 0 { - // receive at least one normal byte - if emptyIovecs(iov) { - var iova [1]Iovec - iova[0].Base = &dummy - iova[0].SetLen(1) - iov = iova[:] - } - msg.Control = (*byte)(unsafe.Pointer(&oob[0])) - msg.SetControllen(len(oob)) - } - if len(iov) > 0 { - msg.Iov = &iov[0] - msg.SetIovlen(len(iov)) - } - if n, err = recvmsg(fd, &msg, flags); n == -1 { - return - } - oobn = int(msg.Controllen) - recvflags = int(msg.Flags) - return + // Recvmsg not implemented on AIX + return -1, -1, -1, ENOSYS } func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Socklen, flags int) (n int, err error) { - var msg Msghdr - msg.Name = (*byte)(unsafe.Pointer(ptr)) - msg.Namelen = uint32(salen) - var dummy byte - var empty bool - if len(oob) > 0 { - // send at least one normal byte - empty = emptyIovecs(iov) - if empty { - var iova [1]Iovec - iova[0].Base = &dummy - iova[0].SetLen(1) - iov = iova[:] - } - msg.Control = (*byte)(unsafe.Pointer(&oob[0])) - msg.SetControllen(len(oob)) - } - if len(iov) > 0 { - msg.Iov = &iov[0] - msg.SetIovlen(len(iov)) - } - if n, err = sendmsg(fd, &msg, flags); err != nil { - return 0, err - } - if len(oob) > 0 && empty { - n = 0 - } - return n, nil + // SendmsgN not implemented on AIX + return -1, ENOSYS } func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { diff --git a/mantle/vendor/golang.org/x/sys/unix/syscall_bsd.go b/mantle/vendor/golang.org/x/sys/unix/syscall_bsd.go index eda42671..c437fc5d 100644 --- a/mantle/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/mantle/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -363,7 +363,7 @@ func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Sockle var empty bool if len(oob) > 0 { // send at least one normal byte - empty = emptyIovecs(iov) + empty := emptyIovecs(iov) if empty { var iova [1]Iovec iova[0].Base = &dummy diff --git a/mantle/vendor/golang.org/x/sys/unix/syscall_linux.go b/mantle/vendor/golang.org/x/sys/unix/syscall_linux.go index ecb0f27f..5e4a94f7 100644 --- a/mantle/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/mantle/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1541,7 +1541,7 @@ func sendmsgN(fd int, iov []Iovec, oob []byte, ptr unsafe.Pointer, salen _Sockle var dummy byte var empty bool if len(oob) > 0 { - empty = emptyIovecs(iov) + empty := emptyIovecs(iov) if empty { var sockType int sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) diff --git a/mantle/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/mantle/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go deleted file mode 100644 index e23c33de..00000000 --- a/mantle/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (openbsd && 386) || (openbsd && amd64) || (openbsd && arm64) -// +build openbsd,386 openbsd,amd64 openbsd,arm64 - -package unix - -import _ "unsafe" - -// Implemented in the runtime package (runtime/sys_openbsd3.go) -func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -//go:linkname syscall_syscall syscall.syscall -//go:linkname syscall_syscall6 syscall.syscall6 -//go:linkname syscall_syscall10 syscall.syscall10 -//go:linkname syscall_rawSyscall syscall.rawSyscall -//go:linkname syscall_rawSyscall6 syscall.rawSyscall6 - -func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) { - return syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, 0) -} diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 36c0dfc7..274e2dab 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/386/include -m32 +// mkerrors.sh -Wall -Werror -static -I/tmp/include -m32 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 4ff94270..95b6eeed 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/amd64/include -m64 +// mkerrors.sh -Wall -Werror -static -I/tmp/include -m64 // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3eaa0fb7..918cd130 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/arm/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d7995bdc..3907dc5a 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/arm64/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 928e24c2..03d5c105 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/loong64/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux // +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 179bffb4..bd794e01 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/mips/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 1fba17bd..6c741b05 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/mips64/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index b77dde31..807b8cd2 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/mips64le/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 78c6c751..a39e4f5c 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/mipsle/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 1c0d31f0..c0fcda86 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/ppc/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux // +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 959dd9bb..f3b72407 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5a873cdb..72f2a45d 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/ppc64le/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e336d141..45b214b4 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/riscv64/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 390c01d9..1897f207 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/s390x/include -fsigned-char +// mkerrors.sh -Wall -Werror -static -I/tmp/include -fsigned-char // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 98a6e5f1..1fb7a395 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -1,11 +1,11 @@ -// mkerrors.sh -Wall -Werror -static -I/tmp/sparc64/include +// mkerrors.sh -Wall -Werror -static -I/tmp/include // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go package unix diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s index f5bb40ed..d6c3e25c 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s +++ b/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s @@ -1,4 +1,4 @@ -// go run mkasm.go darwin amd64 +// go run mkasm_darwin.go amd64 // Code generated by the command above; DO NOT EDIT. //go:build go1.13 diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index b41467a0..7e308a47 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -1,4 +1,4 @@ -// go run mkasm.go darwin amd64 +// go run mkasm_darwin.go amd64 // Code generated by the command above; DO NOT EDIT. //go:build go1.12 diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s index 0c3f76bc..35798972 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s +++ b/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s @@ -1,4 +1,4 @@ -// go run mkasm.go darwin arm64 +// go run mkasm_darwin.go arm64 // Code generated by the command above; DO NOT EDIT. //go:build go1.13 diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index e1f9204a..b09e5bb0 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/mantle/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -1,4 +1,4 @@ -// go run mkasm.go darwin arm64 +// go run mkasm_darwin.go arm64 // Code generated by the command above; DO NOT EDIT. //go:build go1.12 diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 2925fe0a..a057fc5d 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -openbsd -libc -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go +// go run mksyscall.go -l32 -openbsd -tags openbsd,386 syscall_bsd.go syscall_openbsd.go syscall_openbsd_386.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,28 +24,20 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } -var libc_getgroups_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setgroups_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -53,14 +45,10 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } -var libc_wait4_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -68,42 +56,30 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } -var libc_accept_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_accept accept "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_bind_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_bind bind "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_connect_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_connect connect "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -111,94 +87,66 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } -var libc_socket_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_socket socket "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getsockopt_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setsockopt_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getpeername_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getsockname_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_shutdown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_socketpair_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -208,7 +156,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -216,10 +164,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } -var libc_recvfrom_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -229,21 +173,17 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sendto_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sendto sendto "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -251,14 +191,10 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -var libc_recvmsg_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -266,14 +202,10 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -var libc_sendmsg_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -281,10 +213,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -var libc_kevent_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kevent kevent "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -293,35 +221,27 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_utimes_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_utimes utimes "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_futimes_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_futimes futimes "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -329,10 +249,6 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } -var libc_poll_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_poll poll "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -342,17 +258,13 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_madvise_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_madvise madvise "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -362,31 +274,23 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mlock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mlock mlock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mlockall_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -396,17 +300,13 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mprotect_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -416,17 +316,13 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_msync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_msync msync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -436,45 +332,33 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munlock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munlock munlock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munlockall_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_pipe2_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -484,7 +368,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -492,10 +376,6 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } -var libc_getdents_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getdents getdents "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -505,7 +385,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -513,24 +393,16 @@ func Getcwd(buf []byte) (n int, err error) { return } -var libc_getcwd_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -540,21 +412,17 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sysctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -562,10 +430,6 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } -var libc_ppoll_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -574,31 +438,23 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_access_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_access access "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_adjtime_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -607,17 +463,13 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chdir chdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -626,17 +478,13 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chflags_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chflags chflags "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -645,17 +493,13 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chmod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chmod chmod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -664,17 +508,13 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chown chown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -683,35 +523,27 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chroot_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chroot chroot "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_close_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_close close "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -719,49 +551,33 @@ func Dup(fd int) (nfd int, err error) { return } -var libc_dup_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup dup "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_dup2_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_dup3_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + Syscall(SYS_EXIT, uintptr(code), 0, 0) return } -var libc_exit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_exit exit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -770,59 +586,43 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_faccessat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchflags_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchmod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -831,31 +631,23 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchmodat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchown fchown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -864,35 +656,27 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchownat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_flock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_flock flock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -900,24 +684,16 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -var libc_fpathconf_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstat fstat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -926,99 +702,71 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstatat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstatfs_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fsync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fsync fsync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), uintptr(length>>32)) + _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_ftruncate_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } -var libc_getegid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getegid getegid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) uid = int(r0) return } -var libc_geteuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) gid = int(r0) return } -var libc_getgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getgid getgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1026,50 +774,34 @@ func Getpgid(pid int) (pgid int, err error) { return } -var libc_getpgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) pgrp = int(r0) return } -var libc_getpgrp_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) pid = int(r0) return } -var libc_getpid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpid getpid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } -var libc_getppid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getppid getppid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1077,28 +809,20 @@ func Getpriority(which int, who int) (prio int, err error) { return } -var libc_getpriority_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1106,28 +830,20 @@ func Getrtable() (rtable int, err error) { return } -var libc_getrtable_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getrusage_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1135,66 +851,46 @@ func Getsid(pid int) (sid int, err error) { return } -var libc_getsid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsid getsid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_gettimeofday_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) return } -var libc_getuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getuid getuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) tainted = bool(r0 != 0) return } -var libc_issetugid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_kill_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kill kill "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1202,10 +898,6 @@ func Kqueue() (fd int, err error) { return } -var libc_kqueue_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -1214,17 +906,13 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_lchown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lchown lchown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -1238,17 +926,13 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_link_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_link link "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -1262,31 +946,23 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_linkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_linkat linkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_listen_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_listen listen "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -1295,17 +971,13 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_lstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lstat lstat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -1314,17 +986,13 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1333,17 +1001,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkdirat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1352,17 +1016,13 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkfifo_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1371,17 +1031,13 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkfifoat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1390,17 +1046,13 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mknod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mknod mknod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1409,31 +1061,23 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mknodat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_nanosleep_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1442,7 +1086,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1450,10 +1094,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } -var libc_open_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_open open "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1462,7 +1102,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1470,10 +1110,6 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } -var libc_openat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_openat openat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1482,7 +1118,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1490,10 +1126,6 @@ func Pathconf(path string, name int) (val int, err error) { return } -var libc_pathconf_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1503,7 +1135,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1511,10 +1143,6 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } -var libc_pread_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pread pread "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1524,7 +1152,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1532,10 +1160,6 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } -var libc_pwrite_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1545,7 +1169,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1553,10 +1177,6 @@ func read(fd int, p []byte) (n int, err error) { return } -var libc_read_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_read read "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1571,7 +1191,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1579,10 +1199,6 @@ func Readlink(path string, buf []byte) (n int, err error) { return } -var libc_readlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readlink readlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1597,7 +1213,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1605,10 +1221,6 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } -var libc_readlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1622,17 +1234,13 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_rename_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_rename rename "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1646,17 +1254,13 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_renameat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_renameat renameat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1665,17 +1269,13 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_revoke_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_revoke revoke "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1684,21 +1284,17 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_rmdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(offset>>32), uintptr(whence), 0, 0) + r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0) newoffset = int64(int64(r1)<<32 | int64(r0)) if e1 != 0 { err = errnoErr(e1) @@ -1706,14 +1302,10 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } -var libc_lseek_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lseek lseek "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1721,52 +1313,36 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } -var libc_select_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_select select "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setegid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setegid setegid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_seteuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setgid setgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1775,133 +1351,97 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setlogin_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setpgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setpriority_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setregid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setregid setregid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setreuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setresgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setresuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setrtable_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1909,38 +1449,26 @@ func Setsid() (pid int, err error) { return } -var libc_setsid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setsid setsid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_settimeofday_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setuid setuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1949,17 +1477,13 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_stat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_stat stat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1968,17 +1492,13 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_statfs_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_statfs statfs "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1992,17 +1512,13 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_symlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_symlink symlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -2016,31 +1532,23 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_symlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sync sync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -2049,29 +1557,21 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), uintptr(length>>32)) + _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_truncate_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_truncate truncate "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) oldmask = int(r0) return } -var libc_umask_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_umask umask "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -2080,17 +1580,13 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unlink unlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -2099,17 +1595,13 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -2118,17 +1610,13 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unmount_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unmount unmount "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -2138,7 +1626,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2146,14 +1634,10 @@ func write(fd int, p []byte) (n int, err error) { return } -var libc_write_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_write write "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos), uintptr(pos>>32), 0, 0) + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -2161,28 +1645,20 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } -var libc_mmap_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mmap mmap "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munmap_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munmap munmap "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2193,7 +1669,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2209,13 +1685,9 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } - -var libc_utimensat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s deleted file mode 100644 index 75eb2f5f..00000000 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ /dev/null @@ -1,796 +0,0 @@ -// go run mkasm.go openbsd 386 -// Code generated by the command above; DO NOT EDIT. - -#include "textflag.h" - -TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getgroups(SB) - -GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB) - -TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setgroups(SB) - -GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB) - -TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_wait4(SB) - -GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4 -DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB) - -TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_accept(SB) - -GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4 -DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB) - -TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_bind(SB) - -GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4 -DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB) - -TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_connect(SB) - -GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4 -DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB) - -TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_socket(SB) - -GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4 -DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB) - -TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsockopt(SB) - -GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB) - -TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setsockopt(SB) - -GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB) - -TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpeername(SB) - -GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB) - -TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsockname(SB) - -GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB) - -TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_shutdown(SB) - -GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4 -DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB) - -TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_socketpair(SB) - -GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4 -DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB) - -TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_recvfrom(SB) - -GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4 -DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB) - -TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sendto(SB) - -GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4 -DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB) - -TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_recvmsg(SB) - -GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4 -DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB) - -TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sendmsg(SB) - -GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4 -DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB) - -TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kevent(SB) - -GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4 -DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB) - -TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_utimes(SB) - -GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4 -DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB) - -TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_futimes(SB) - -GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4 -DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB) - -TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_poll(SB) - -GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4 -DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB) - -TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_madvise(SB) - -GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4 -DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB) - -TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mlock(SB) - -GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB) - -TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mlockall(SB) - -GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB) - -TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mprotect(SB) - -GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB) - -TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_msync(SB) - -GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4 -DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB) - -TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munlock(SB) - -GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4 -DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB) - -TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munlockall(SB) - -GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4 -DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB) - -TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pipe2(SB) - -GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4 -DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB) - -TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getdents(SB) - -GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB) - -TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getcwd(SB) - -GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB) - -TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ioctl(SB) - -GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4 -DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB) - -TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) - -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 -DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) - -TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ppoll(SB) - -GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 -DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB) - -TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_access(SB) - -GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4 -DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB) - -TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_adjtime(SB) - -GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4 -DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB) - -TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chdir(SB) - -GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4 -DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB) - -TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chflags(SB) - -GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4 -DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB) - -TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chmod(SB) - -GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4 -DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB) - -TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chown(SB) - -GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4 -DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB) - -TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chroot(SB) - -GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4 -DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB) - -TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_close(SB) - -GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4 -DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB) - -TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup(SB) - -GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4 -DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB) - -TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup2(SB) - -GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4 -DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB) - -TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup3(SB) - -GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4 -DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB) - -TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_exit(SB) - -GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB) - -TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_faccessat(SB) - -GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB) - -TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchdir(SB) - -GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB) - -TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchflags(SB) - -GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB) - -TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchmod(SB) - -GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB) - -TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchmodat(SB) - -GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB) - -TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchown(SB) - -GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB) - -TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchownat(SB) - -GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB) - -TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_flock(SB) - -GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4 -DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB) - -TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fpathconf(SB) - -GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB) - -TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstat(SB) - -GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB) - -TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstatat(SB) - -GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB) - -TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstatfs(SB) - -GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB) - -TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fsync(SB) - -GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4 -DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB) - -TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ftruncate(SB) - -GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4 -DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB) - -TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getegid(SB) - -GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB) - -TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_geteuid(SB) - -GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB) - -TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getgid(SB) - -GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB) - -TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpgid(SB) - -GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB) - -TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpgrp(SB) - -GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB) - -TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpid(SB) - -GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB) - -TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getppid(SB) - -GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB) - -TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpriority(SB) - -GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB) - -TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrlimit(SB) - -GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB) - -TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrtable(SB) - -GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB) - -TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrusage(SB) - -GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB) - -TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsid(SB) - -GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB) - -TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) - -GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4 -DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB) - -TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getuid(SB) - -GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB) - -TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_issetugid(SB) - -GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB) - -TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kill(SB) - -GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4 -DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB) - -TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kqueue(SB) - -GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4 -DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB) - -TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lchown(SB) - -GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4 -DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB) - -TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_link(SB) - -GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4 -DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB) - -TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_linkat(SB) - -GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB) - -TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_listen(SB) - -GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4 -DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB) - -TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lstat(SB) - -GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB) - -TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkdir(SB) - -GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB) - -TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkdirat(SB) - -GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB) - -TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkfifo(SB) - -GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB) - -TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkfifoat(SB) - -GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB) - -TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mknod(SB) - -GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB) - -TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mknodat(SB) - -GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) - -TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_nanosleep(SB) - -GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 -DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB) - -TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_open(SB) - -GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4 -DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB) - -TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_openat(SB) - -GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB) - -TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pathconf(SB) - -GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4 -DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB) - -TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pread(SB) - -GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4 -DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB) - -TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pwrite(SB) - -GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4 -DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB) - -TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_read(SB) - -GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4 -DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB) - -TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readlink(SB) - -GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4 -DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB) - -TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readlinkat(SB) - -GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB) - -TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_rename(SB) - -GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4 -DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB) - -TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_renameat(SB) - -GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB) - -TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_revoke(SB) - -GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4 -DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB) - -TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_rmdir(SB) - -GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4 -DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB) - -TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lseek(SB) - -GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4 -DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB) - -TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_select(SB) - -GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4 -DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB) - -TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setegid(SB) - -GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB) - -TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_seteuid(SB) - -GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB) - -TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setgid(SB) - -GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB) - -TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setlogin(SB) - -GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB) - -TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setpgid(SB) - -GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB) - -TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setpriority(SB) - -GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB) - -TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setregid(SB) - -GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB) - -TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setreuid(SB) - -GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB) - -TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setresgid(SB) - -GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB) - -TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setresuid(SB) - -GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB) - -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB) - -TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrtable(SB) - -GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB) - -TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setsid(SB) - -GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB) - -TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_settimeofday(SB) - -GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4 -DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB) - -TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setuid(SB) - -GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4 -DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB) - -TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_stat(SB) - -GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB) - -TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_statfs(SB) - -GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4 -DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB) - -TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_symlink(SB) - -GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4 -DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB) - -TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_symlinkat(SB) - -GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB) - -TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sync(SB) - -GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4 -DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB) - -TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_truncate(SB) - -GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4 -DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB) - -TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_umask(SB) - -GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4 -DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB) - -TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unlink(SB) - -GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4 -DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB) - -TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unlinkat(SB) - -GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB) - -TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unmount(SB) - -GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4 -DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB) - -TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_write(SB) - -GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4 -DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB) - -TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mmap(SB) - -GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4 -DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB) - -TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munmap(SB) - -GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 -DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) - -TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_utimensat(SB) - -GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 -DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 98446d2b..04db8fa2 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -libc -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go +// go run mksyscall.go -openbsd -tags openbsd,amd64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,28 +24,20 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } -var libc_getgroups_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setgroups_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -53,14 +45,10 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } -var libc_wait4_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -68,42 +56,30 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } -var libc_accept_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_accept accept "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_bind_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_bind bind "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_connect_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_connect connect "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -111,94 +87,66 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } -var libc_socket_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_socket socket "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getsockopt_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setsockopt_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getpeername_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getsockname_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_shutdown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_socketpair_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -208,7 +156,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -216,10 +164,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } -var libc_recvfrom_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -229,21 +173,17 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sendto_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sendto sendto "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -251,14 +191,10 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -var libc_recvmsg_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -266,14 +202,10 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -var libc_sendmsg_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -281,10 +213,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -var libc_kevent_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kevent kevent "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -293,35 +221,27 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_utimes_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_utimes utimes "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_futimes_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_futimes futimes "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -329,10 +249,6 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } -var libc_poll_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_poll poll "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -342,17 +258,13 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_madvise_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_madvise madvise "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -362,31 +274,23 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mlock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mlock mlock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mlockall_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -396,17 +300,13 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mprotect_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -416,17 +316,13 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_msync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_msync msync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -436,45 +332,33 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munlock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munlock munlock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munlockall_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_pipe2_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -484,7 +368,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -492,10 +376,6 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } -var libc_getdents_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getdents getdents "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -505,7 +385,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -513,24 +393,16 @@ func Getcwd(buf []byte) (n int, err error) { return } -var libc_getcwd_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -540,21 +412,17 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sysctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -562,10 +430,6 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } -var libc_ppoll_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -574,31 +438,23 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_access_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_access access "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_adjtime_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -607,17 +463,13 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chdir chdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -626,17 +478,13 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chflags_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chflags chflags "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -645,17 +493,13 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chmod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chmod chmod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -664,17 +508,13 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chown chown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -683,35 +523,27 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chroot_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chroot chroot "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_close_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_close close "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -719,49 +551,33 @@ func Dup(fd int) (nfd int, err error) { return } -var libc_dup_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup dup "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_dup2_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_dup3_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + Syscall(SYS_EXIT, uintptr(code), 0, 0) return } -var libc_exit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_exit exit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -770,59 +586,43 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_faccessat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchflags_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchmod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -831,31 +631,23 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchmodat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchown fchown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -864,35 +656,27 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchownat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_flock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_flock flock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -900,24 +684,16 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -var libc_fpathconf_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstat fstat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -926,99 +702,71 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstatat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstatfs_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fsync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fsync fsync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_ftruncate_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } -var libc_getegid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getegid getegid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) uid = int(r0) return } -var libc_geteuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) gid = int(r0) return } -var libc_getgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getgid getgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1026,50 +774,34 @@ func Getpgid(pid int) (pgid int, err error) { return } -var libc_getpgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) pgrp = int(r0) return } -var libc_getpgrp_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) pid = int(r0) return } -var libc_getpid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpid getpid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } -var libc_getppid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getppid getppid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1077,28 +809,20 @@ func Getpriority(which int, who int) (prio int, err error) { return } -var libc_getpriority_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1106,28 +830,20 @@ func Getrtable() (rtable int, err error) { return } -var libc_getrtable_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getrusage_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1135,66 +851,46 @@ func Getsid(pid int) (sid int, err error) { return } -var libc_getsid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsid getsid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_gettimeofday_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) return } -var libc_getuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getuid getuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) tainted = bool(r0 != 0) return } -var libc_issetugid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_kill_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kill kill "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1202,10 +898,6 @@ func Kqueue() (fd int, err error) { return } -var libc_kqueue_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -1214,17 +906,13 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_lchown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lchown lchown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -1238,17 +926,13 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_link_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_link link "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -1262,31 +946,23 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_linkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_linkat linkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_listen_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_listen listen "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -1295,17 +971,13 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_lstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lstat lstat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -1314,17 +986,13 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1333,17 +1001,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkdirat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1352,17 +1016,13 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkfifo_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1371,17 +1031,13 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkfifoat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1390,17 +1046,13 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mknod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mknod mknod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1409,31 +1061,23 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mknodat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_nanosleep_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1442,7 +1086,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1450,10 +1094,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } -var libc_open_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_open open "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1462,7 +1102,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1470,10 +1110,6 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } -var libc_openat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_openat openat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1482,7 +1118,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1490,10 +1126,6 @@ func Pathconf(path string, name int) (val int, err error) { return } -var libc_pathconf_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1503,7 +1135,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1511,10 +1143,6 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } -var libc_pread_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pread pread "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1524,7 +1152,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1532,10 +1160,6 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } -var libc_pwrite_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1545,7 +1169,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1553,10 +1177,6 @@ func read(fd int, p []byte) (n int, err error) { return } -var libc_read_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_read read "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1571,7 +1191,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1579,10 +1199,6 @@ func Readlink(path string, buf []byte) (n int, err error) { return } -var libc_readlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readlink readlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1597,7 +1213,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1605,10 +1221,6 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } -var libc_readlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1622,17 +1234,13 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_rename_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_rename rename "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1646,17 +1254,13 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_renameat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_renameat renameat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1665,17 +1269,13 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_revoke_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_revoke revoke "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1684,21 +1284,17 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_rmdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1706,14 +1302,10 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } -var libc_lseek_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lseek lseek "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1721,52 +1313,36 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } -var libc_select_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_select select "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setegid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setegid setegid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_seteuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setgid setgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1775,133 +1351,97 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setlogin_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setpgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setpriority_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setregid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setregid setregid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setreuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setresgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setresuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setrtable_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1909,38 +1449,26 @@ func Setsid() (pid int, err error) { return } -var libc_setsid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setsid setsid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_settimeofday_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setuid setuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1949,17 +1477,13 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_stat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_stat stat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1968,17 +1492,13 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_statfs_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_statfs statfs "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1992,17 +1512,13 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_symlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_symlink symlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -2016,31 +1532,23 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_symlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sync sync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -2049,29 +1557,21 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_truncate_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_truncate truncate "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) oldmask = int(r0) return } -var libc_umask_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_umask umask "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -2080,17 +1580,13 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unlink unlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -2099,17 +1595,13 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -2118,17 +1610,13 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unmount_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unmount unmount "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -2138,7 +1626,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2146,14 +1634,10 @@ func write(fd int, p []byte) (n int, err error) { return } -var libc_write_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_write write "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -2161,28 +1645,20 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } -var libc_mmap_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mmap mmap "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munmap_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munmap munmap "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2193,7 +1669,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2209,13 +1685,9 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } - -var libc_utimensat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s deleted file mode 100644 index 243a6663..00000000 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ /dev/null @@ -1,796 +0,0 @@ -// go run mkasm.go openbsd amd64 -// Code generated by the command above; DO NOT EDIT. - -#include "textflag.h" - -TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getgroups(SB) - -GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) - -TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setgroups(SB) - -GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) - -TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_wait4(SB) - -GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 -DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) - -TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_accept(SB) - -GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 -DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) - -TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_bind(SB) - -GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 -DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) - -TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_connect(SB) - -GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 -DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) - -TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_socket(SB) - -GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 -DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) - -TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsockopt(SB) - -GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) - -TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setsockopt(SB) - -GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) - -TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpeername(SB) - -GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) - -TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsockname(SB) - -GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) - -TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_shutdown(SB) - -GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 -DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) - -TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_socketpair(SB) - -GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 -DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) - -TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_recvfrom(SB) - -GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 -DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) - -TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sendto(SB) - -GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) - -TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_recvmsg(SB) - -GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 -DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) - -TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sendmsg(SB) - -GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) - -TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kevent(SB) - -GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 -DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) - -TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_utimes(SB) - -GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 -DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) - -TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_futimes(SB) - -GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 -DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) - -TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_poll(SB) - -GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 -DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) - -TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_madvise(SB) - -GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 -DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) - -TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mlock(SB) - -GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) - -TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mlockall(SB) - -GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) - -TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mprotect(SB) - -GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) - -TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_msync(SB) - -GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 -DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) - -TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munlock(SB) - -GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 -DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) - -TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munlockall(SB) - -GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 -DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) - -TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pipe2(SB) - -GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) - -TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getdents(SB) - -GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) - -TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getcwd(SB) - -GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) - -TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ioctl(SB) - -GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) - -TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) - -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) - -TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ppoll(SB) - -GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 -DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) - -TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_access(SB) - -GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 -DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) - -TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_adjtime(SB) - -GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 -DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) - -TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chdir(SB) - -GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) - -TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chflags(SB) - -GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) - -TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chmod(SB) - -GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) - -TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chown(SB) - -GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) - -TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chroot(SB) - -GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) - -TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_close(SB) - -GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 -DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) - -TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup(SB) - -GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 -DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) - -TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup2(SB) - -GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 -DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) - -TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup3(SB) - -GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 -DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) - -TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_exit(SB) - -GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) - -TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_faccessat(SB) - -GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) - -TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchdir(SB) - -GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) - -TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchflags(SB) - -GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) - -TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchmod(SB) - -GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) - -TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchmodat(SB) - -GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) - -TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchown(SB) - -GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) - -TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchownat(SB) - -GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) - -TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_flock(SB) - -GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 -DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) - -TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fpathconf(SB) - -GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) - -TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstat(SB) - -GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) - -TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstatat(SB) - -GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) - -TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstatfs(SB) - -GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) - -TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fsync(SB) - -GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) - -TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ftruncate(SB) - -GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 -DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) - -TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getegid(SB) - -GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) - -TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_geteuid(SB) - -GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) - -TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getgid(SB) - -GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) - -TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpgid(SB) - -GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) - -TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpgrp(SB) - -GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) - -TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpid(SB) - -GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) - -TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getppid(SB) - -GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) - -TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpriority(SB) - -GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) - -TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrlimit(SB) - -GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) - -TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrtable(SB) - -GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) - -TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrusage(SB) - -GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) - -TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsid(SB) - -GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) - -TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) - -GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 -DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) - -TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getuid(SB) - -GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) - -TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_issetugid(SB) - -GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) - -TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kill(SB) - -GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 -DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) - -TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kqueue(SB) - -GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 -DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) - -TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lchown(SB) - -GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 -DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) - -TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_link(SB) - -GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 -DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) - -TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_linkat(SB) - -GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) - -TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_listen(SB) - -GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 -DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) - -TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lstat(SB) - -GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) - -TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkdir(SB) - -GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) - -TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkdirat(SB) - -GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) - -TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkfifo(SB) - -GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) - -TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkfifoat(SB) - -GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) - -TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mknod(SB) - -GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) - -TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mknodat(SB) - -GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) - -TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_nanosleep(SB) - -GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 -DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) - -TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_open(SB) - -GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 -DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) - -TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_openat(SB) - -GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) - -TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pathconf(SB) - -GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) - -TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pread(SB) - -GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) - -TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pwrite(SB) - -GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) - -TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_read(SB) - -GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 -DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) - -TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readlink(SB) - -GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) - -TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readlinkat(SB) - -GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) - -TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_rename(SB) - -GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 -DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) - -TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_renameat(SB) - -GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) - -TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_revoke(SB) - -GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 -DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) - -TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_rmdir(SB) - -GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) - -TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lseek(SB) - -GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 -DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) - -TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_select(SB) - -GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 -DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) - -TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setegid(SB) - -GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) - -TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_seteuid(SB) - -GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) - -TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setgid(SB) - -GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) - -TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setlogin(SB) - -GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) - -TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setpgid(SB) - -GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) - -TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setpriority(SB) - -GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) - -TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setregid(SB) - -GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) - -TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setreuid(SB) - -GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) - -TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setresgid(SB) - -GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) - -TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setresuid(SB) - -GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) - -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - -TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrtable(SB) - -GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) - -TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setsid(SB) - -GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) - -TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_settimeofday(SB) - -GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 -DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) - -TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setuid(SB) - -GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) - -TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_stat(SB) - -GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) - -TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_statfs(SB) - -GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 -DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) - -TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_symlink(SB) - -GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 -DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) - -TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_symlinkat(SB) - -GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) - -TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sync(SB) - -GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) - -TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_truncate(SB) - -GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 -DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) - -TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_umask(SB) - -GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 -DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) - -TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unlink(SB) - -GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) - -TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unlinkat(SB) - -GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) - -TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unmount(SB) - -GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) - -TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_write(SB) - -GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 -DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) - -TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mmap(SB) - -GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) - -TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munmap(SB) - -GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 -DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) - -TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_utimensat(SB) - -GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 800aab6e..c96a5051 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -openbsd -libc -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go +// go run mksyscall.go -openbsd -tags openbsd,arm64 syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 @@ -16,7 +16,7 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -24,28 +24,20 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { return } -var libc_getgroups_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getgroups getgroups "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) + _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setgroups_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setgroups setgroups "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -53,14 +45,10 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err return } -var libc_wait4_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_wait4 wait4 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -68,42 +56,30 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { return } -var libc_accept_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_accept accept "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_bind_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_bind bind "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen)) + _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_connect_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_connect connect "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -111,94 +87,66 @@ func socket(domain int, typ int, proto int) (fd int, err error) { return } -var libc_socket_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_socket socket "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) + _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getsockopt_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) + _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setsockopt_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getpeername_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpeername getpeername "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getsockname_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsockname getsockname "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { - _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0) + _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_shutdown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_shutdown shutdown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) + _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_socketpair_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_socketpair socketpair "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { @@ -208,7 +156,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -216,10 +164,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl return } -var libc_recvfrom_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { @@ -229,21 +173,17 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sendto_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sendto sendto "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -251,14 +191,10 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -var libc_recvmsg_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -266,14 +202,10 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { return } -var libc_sendmsg_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) + r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -281,10 +213,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne return } -var libc_kevent_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kevent kevent "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { @@ -293,35 +221,27 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_utimes_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_utimes utimes "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { - _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) + _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_futimes_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_futimes futimes "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) + r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -329,10 +249,6 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { return } -var libc_poll_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_poll poll "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Madvise(b []byte, behav int) (err error) { @@ -342,17 +258,13 @@ func Madvise(b []byte, behav int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav)) + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_madvise_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_madvise madvise "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { @@ -362,31 +274,23 @@ func Mlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mlock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mlock mlock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0) + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mlockall_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mlockall mlockall "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { @@ -396,17 +300,13 @@ func Mprotect(b []byte, prot int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mprotect_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mprotect mprotect "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Msync(b []byte, flags int) (err error) { @@ -416,17 +316,13 @@ func Msync(b []byte, flags int) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_msync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_msync msync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { @@ -436,45 +332,33 @@ func Munlock(b []byte) (err error) { } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0) + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munlock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munlock munlock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { - _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0) + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munlockall_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munlockall munlockall "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_pipe2_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdents(fd int, buf []byte) (n int, err error) { @@ -484,7 +368,7 @@ func Getdents(fd int, buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -492,10 +376,6 @@ func Getdents(fd int, buf []byte) (n int, err error) { return } -var libc_getdents_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getdents getdents "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getcwd(buf []byte) (n int, err error) { @@ -505,7 +385,7 @@ func Getcwd(buf []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0) + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -513,24 +393,16 @@ func Getcwd(buf []byte) (n int, err error) { return } -var libc_getcwd_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getcwd getcwd "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -540,21 +412,17 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sysctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -562,10 +430,6 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, return } -var libc_ppoll_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ppoll ppoll "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { @@ -574,31 +438,23 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_access_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_access access "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { - _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) + _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_adjtime_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_adjtime adjtime "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { @@ -607,17 +463,13 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chdir chdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { @@ -626,17 +478,13 @@ func Chflags(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chflags_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chflags chflags "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { @@ -645,17 +493,13 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chmod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chmod chmod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { @@ -664,17 +508,13 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chown chown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { @@ -683,35 +523,27 @@ func Chroot(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_chroot_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_chroot chroot "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_close_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_close close "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { - r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0) + r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -719,49 +551,33 @@ func Dup(fd int) (nfd int, err error) { return } -var libc_dup_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup dup "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { - _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0) + _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_dup2_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup2 dup2 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup3(from int, to int, flags int) (err error) { - _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags)) + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_dup3_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_dup3 dup3 "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { - syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0) + Syscall(SYS_EXIT, uintptr(code), 0, 0) return } -var libc_exit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_exit exit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -770,59 +586,43 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_faccessat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_faccessat faccessat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchdir fchdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { - _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchflags_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchflags fchflags "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchmod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchmod fchmod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { @@ -831,31 +631,23 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchmodat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchown fchown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { @@ -864,35 +656,27 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fchownat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fchownat fchownat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0) + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_flock_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_flock flock "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { - r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0) + r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -900,24 +684,16 @@ func Fpathconf(fd int, name int) (val int, err error) { return } -var libc_fpathconf_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { - _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstat fstat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { @@ -926,99 +702,71 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstatat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { - _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fstatfs_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0) + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_fsync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_fsync fsync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(libc_ftruncate_trampoline_addr, uintptr(fd), uintptr(length), 0) + _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_ftruncate_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { - r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } -var libc_getegid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getegid getegid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { - r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) uid = int(r0) return } -var libc_geteuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_geteuid geteuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { - r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) gid = int(r0) return } -var libc_getgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getgid getgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0) + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1026,50 +774,34 @@ func Getpgid(pid int) (pgid int, err error) { return } -var libc_getpgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpgid getpgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { - r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) pgrp = int(r0) return } -var libc_getpgrp_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { - r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) pid = int(r0) return } -var libc_getpid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpid getpid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { - r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } -var libc_getppid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getppid getppid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0) + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1077,28 +809,20 @@ func Getpriority(which int, who int) (prio int, err error) { return } -var libc_getpriority_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getpriority getpriority "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrtable() (rtable int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0) + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) rtable = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1106,28 +830,20 @@ func Getrtable() (rtable int, err error) { return } -var libc_getrtable_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrtable getrtable "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_getrusage_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getrusage getrusage "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0) + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1135,66 +851,46 @@ func Getsid(pid int) (sid int, err error) { return } -var libc_getsid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getsid getsid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Gettimeofday(tv *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0) + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_gettimeofday_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { - r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0) + r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) return } -var libc_getuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_getuid getuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { - r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0) + r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0) tainted = bool(r0 != 0) return } -var libc_issetugid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_issetugid issetugid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kill(pid int, signum syscall.Signal) (err error) { - _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0) + _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_kill_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kill kill "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { - r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0) + r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1202,10 +898,6 @@ func Kqueue() (fd int, err error) { return } -var libc_kqueue_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_kqueue kqueue "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { @@ -1214,17 +906,13 @@ func Lchown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_lchown_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lchown lchown "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { @@ -1238,17 +926,13 @@ func Link(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_link_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_link link "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) { @@ -1262,31 +946,23 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er if err != nil { return } - _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_linkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_linkat linkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0) + _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_listen_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_listen listen "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { @@ -1295,17 +971,13 @@ func Lstat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_lstat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lstat lstat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { @@ -1314,17 +986,13 @@ func Mkdir(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkdir mkdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdirat(dirfd int, path string, mode uint32) (err error) { @@ -1333,17 +1001,13 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkdirat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { @@ -1352,17 +1016,13 @@ func Mkfifo(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkfifo_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifoat(dirfd int, path string, mode uint32) (err error) { @@ -1371,17 +1031,13 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mkfifoat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { @@ -1390,17 +1046,13 @@ func Mknod(path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mknod_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mknod mknod "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { @@ -1409,31 +1061,23 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_mknodat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mknodat mknodat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_nanosleep_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { @@ -1442,7 +1086,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1450,10 +1094,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { return } -var libc_open_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_open open "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { @@ -1462,7 +1102,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1470,10 +1110,6 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { return } -var libc_openat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_openat openat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { @@ -1482,7 +1118,7 @@ func Pathconf(path string, name int) (val int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) + r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1490,10 +1126,6 @@ func Pathconf(path string, name int) (val int, err error) { return } -var libc_pathconf_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pathconf pathconf "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pread(fd int, p []byte, offset int64) (n int, err error) { @@ -1503,7 +1135,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1511,10 +1143,6 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { return } -var libc_pread_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pread pread "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pwrite(fd int, p []byte, offset int64) (n int, err error) { @@ -1524,7 +1152,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) + r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1532,10 +1160,6 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { return } -var libc_pwrite_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_pwrite pwrite "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { @@ -1545,7 +1169,7 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1553,10 +1177,6 @@ func read(fd int, p []byte) (n int, err error) { return } -var libc_read_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_read read "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { @@ -1571,7 +1191,7 @@ func Readlink(path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1579,10 +1199,6 @@ func Readlink(path string, buf []byte) (n int, err error) { return } -var libc_readlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readlink readlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { @@ -1597,7 +1213,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { } else { _p1 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1605,10 +1221,6 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { return } -var libc_readlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { @@ -1622,17 +1234,13 @@ func Rename(from string, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_rename_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_rename rename "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Renameat(fromfd int, from string, tofd int, to string) (err error) { @@ -1646,17 +1254,13 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) + _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_renameat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_renameat renameat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { @@ -1665,17 +1269,13 @@ func Revoke(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_revoke_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_revoke revoke "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { @@ -1684,21 +1284,17 @@ func Rmdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_rmdir_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_rmdir rmdir "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { - r0, _, e1 := syscall_syscall(libc_lseek_trampoline_addr, uintptr(fd), uintptr(offset), uintptr(whence)) + r0, _, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(whence), 0, 0) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) @@ -1706,14 +1302,10 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { return } -var libc_lseek_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_lseek lseek "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1721,52 +1313,36 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err return } -var libc_select_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_select select "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setegid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setegid setegid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_seteuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_seteuid seteuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setgid setgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { @@ -1775,133 +1351,97 @@ func Setlogin(name string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setlogin_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setlogin setlogin "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0) + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setpgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setpgid setpgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio)) + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setpriority_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setpriority setpriority "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0) + _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setregid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setregid setregid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0) + _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setreuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setreuid setreuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresgid(rgid int, egid int, sgid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid)) + _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setresgid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setresgid setresgid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setresuid(ruid int, euid int, suid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid)) + _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setresuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setresuid setresuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) + _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setrlimit_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrtable(rtable int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0) + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setrtable_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setrtable setrtable "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0) + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -1909,38 +1449,26 @@ func Setsid() (pid int, err error) { return } -var libc_setsid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setsid setsid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { - _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0) + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_settimeofday_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0) + _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_setuid_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_setuid setuid "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { @@ -1949,17 +1477,13 @@ func Stat(path string, stat *Stat_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_stat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_stat stat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { @@ -1968,17 +1492,13 @@ func Statfs(path string, stat *Statfs_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) + _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_statfs_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_statfs statfs "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { @@ -1992,17 +1512,13 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_symlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_symlink symlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { @@ -2016,31 +1532,23 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) if e1 != 0 { err = errnoErr(e1) } return } -var libc_symlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { - _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0) + _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_sync_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sync sync "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { @@ -2049,29 +1557,21 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) + _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_truncate_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_truncate truncate "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { - r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0) + r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) oldmask = int(r0) return } -var libc_umask_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_umask umask "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { @@ -2080,17 +1580,13 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unlink_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unlink unlink "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlinkat(dirfd int, path string, flags int) (err error) { @@ -2099,17 +1595,13 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unlinkat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { @@ -2118,17 +1610,13 @@ func Unmount(path string, flags int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_unmount_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_unmount unmount "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { @@ -2138,7 +1626,7 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p))) + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2146,14 +1634,10 @@ func write(fd int, p []byte) (n int, err error) { return } -var libc_write_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_write write "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), 0, 0) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) @@ -2161,28 +1645,20 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( return } -var libc_mmap_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_mmap mmap "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0) + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } -var libc_munmap_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_munmap munmap "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2193,7 +1669,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) @@ -2209,13 +1685,9 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error if err != nil { return } - _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } - -var libc_utimensat_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_utimensat utimensat "libc.so" diff --git a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s deleted file mode 100644 index 4efeff9a..00000000 --- a/mantle/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ /dev/null @@ -1,796 +0,0 @@ -// go run mkasm.go openbsd arm64 -// Code generated by the command above; DO NOT EDIT. - -#include "textflag.h" - -TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getgroups(SB) - -GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) - -TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setgroups(SB) - -GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) - -TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_wait4(SB) - -GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 -DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) - -TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_accept(SB) - -GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 -DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) - -TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_bind(SB) - -GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 -DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) - -TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_connect(SB) - -GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 -DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) - -TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_socket(SB) - -GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 -DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) - -TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsockopt(SB) - -GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) - -TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setsockopt(SB) - -GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) - -TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpeername(SB) - -GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) - -TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsockname(SB) - -GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) - -TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_shutdown(SB) - -GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 -DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) - -TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_socketpair(SB) - -GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 -DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) - -TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_recvfrom(SB) - -GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 -DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) - -TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sendto(SB) - -GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) - -TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_recvmsg(SB) - -GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 -DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) - -TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sendmsg(SB) - -GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) - -TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kevent(SB) - -GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 -DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) - -TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_utimes(SB) - -GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 -DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) - -TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_futimes(SB) - -GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 -DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) - -TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_poll(SB) - -GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 -DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) - -TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_madvise(SB) - -GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 -DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) - -TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mlock(SB) - -GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) - -TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mlockall(SB) - -GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) - -TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mprotect(SB) - -GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) - -TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_msync(SB) - -GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 -DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) - -TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munlock(SB) - -GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 -DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) - -TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munlockall(SB) - -GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 -DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) - -TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pipe2(SB) - -GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB) - -TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getdents(SB) - -GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB) - -TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getcwd(SB) - -GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) - -TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ioctl(SB) - -GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) - -TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) - -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) - -TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ppoll(SB) - -GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 -DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB) - -TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_access(SB) - -GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 -DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) - -TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_adjtime(SB) - -GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 -DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) - -TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chdir(SB) - -GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) - -TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chflags(SB) - -GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) - -TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chmod(SB) - -GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) - -TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chown(SB) - -GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) - -TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_chroot(SB) - -GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 -DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) - -TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_close(SB) - -GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 -DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) - -TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup(SB) - -GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 -DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) - -TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup2(SB) - -GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 -DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) - -TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_dup3(SB) - -GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8 -DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB) - -TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_exit(SB) - -GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) - -TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_faccessat(SB) - -GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) - -TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchdir(SB) - -GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) - -TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchflags(SB) - -GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) - -TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchmod(SB) - -GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) - -TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchmodat(SB) - -GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) - -TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchown(SB) - -GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) - -TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fchownat(SB) - -GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) - -TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_flock(SB) - -GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 -DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) - -TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fpathconf(SB) - -GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) - -TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstat(SB) - -GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) - -TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstatat(SB) - -GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) - -TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fstatfs(SB) - -GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) - -TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_fsync(SB) - -GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 -DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) - -TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_ftruncate(SB) - -GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 -DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) - -TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getegid(SB) - -GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) - -TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_geteuid(SB) - -GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) - -TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getgid(SB) - -GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) - -TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpgid(SB) - -GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) - -TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpgrp(SB) - -GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) - -TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpid(SB) - -GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) - -TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getppid(SB) - -GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) - -TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getpriority(SB) - -GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) - -TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrlimit(SB) - -GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) - -TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrtable(SB) - -GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB) - -TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getrusage(SB) - -GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) - -TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getsid(SB) - -GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) - -TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_gettimeofday(SB) - -GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 -DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) - -TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_getuid(SB) - -GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) - -TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_issetugid(SB) - -GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) - -TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kill(SB) - -GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 -DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) - -TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_kqueue(SB) - -GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 -DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) - -TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lchown(SB) - -GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 -DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) - -TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_link(SB) - -GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 -DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) - -TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_linkat(SB) - -GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) - -TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_listen(SB) - -GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 -DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) - -TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lstat(SB) - -GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) - -TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkdir(SB) - -GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) - -TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkdirat(SB) - -GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) - -TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkfifo(SB) - -GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) - -TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mkfifoat(SB) - -GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB) - -TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mknod(SB) - -GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) - -TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mknodat(SB) - -GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) - -TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_nanosleep(SB) - -GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 -DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB) - -TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_open(SB) - -GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 -DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) - -TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_openat(SB) - -GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) - -TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pathconf(SB) - -GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) - -TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pread(SB) - -GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) - -TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_pwrite(SB) - -GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 -DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) - -TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_read(SB) - -GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 -DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) - -TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readlink(SB) - -GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) - -TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_readlinkat(SB) - -GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) - -TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_rename(SB) - -GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 -DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) - -TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_renameat(SB) - -GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) - -TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_revoke(SB) - -GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 -DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) - -TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_rmdir(SB) - -GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 -DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) - -TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_lseek(SB) - -GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 -DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) - -TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_select(SB) - -GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 -DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) - -TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setegid(SB) - -GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) - -TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_seteuid(SB) - -GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) - -TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setgid(SB) - -GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) - -TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setlogin(SB) - -GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) - -TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setpgid(SB) - -GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) - -TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setpriority(SB) - -GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) - -TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setregid(SB) - -GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) - -TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setreuid(SB) - -GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) - -TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setresgid(SB) - -GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB) - -TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setresuid(SB) - -GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB) - -TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrlimit(SB) - -GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB) - -TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setrtable(SB) - -GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB) - -TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setsid(SB) - -GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) - -TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_settimeofday(SB) - -GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 -DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) - -TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_setuid(SB) - -GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 -DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) - -TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_stat(SB) - -GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) - -TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_statfs(SB) - -GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 -DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) - -TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_symlink(SB) - -GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 -DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) - -TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_symlinkat(SB) - -GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) - -TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sync(SB) - -GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) - -TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_truncate(SB) - -GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 -DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) - -TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_umask(SB) - -GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 -DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) - -TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unlink(SB) - -GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) - -TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unlinkat(SB) - -GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) - -TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_unmount(SB) - -GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 -DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) - -TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_write(SB) - -GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 -DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) - -TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_mmap(SB) - -GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 -DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) - -TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_munmap(SB) - -GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 -DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) - -TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_utimensat(SB) - -GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 -DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c9c4ad03..62192e1d 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/386/include -m32 /tmp/386/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 12ff3417..490aab5d 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/amd64/include -m64 /tmp/amd64/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c3fb5e77..aca17b6f 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm/include /tmp/arm/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 358c847a..54b4dfa5 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/arm64/include -fsigned-char /tmp/arm64/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 81c4849b..44a764c9 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/loong64/include /tmp/loong64/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 202a57e9..65a99efc 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips/include /tmp/mips/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 1fbceb52..841c8a66 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64/include /tmp/mips64/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b4ffb7a2..e26a7c76 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mips64le/include /tmp/mips64le/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 867985f9..26447260 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/mipsle/include /tmp/mipsle/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index a8cce69e..26aefc18 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc/include /tmp/ppc/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index d44c5b39..8d4cd9d9 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64/include /tmp/ppc64/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4214dd9c..3b405d1f 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/ppc64le/include /tmp/ppc64le/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 3e594a8c..3a9c96b2 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/riscv64/include /tmp/riscv64/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 7ea46520..8ffa6646 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/s390x/include -fsigned-char /tmp/s390x/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 92f628ef..6a39640e 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -1,4 +1,4 @@ -// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/sparc64/include /tmp/sparc64/include/asm/unistd.h +// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 59773381..817edbf9 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -6,7 +6,6 @@ package unix -// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af2918..ea453614 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -6,7 +6,6 @@ package unix -// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef591..32eec5ed 100644 --- a/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/mantle/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -6,7 +6,6 @@ package unix -// Deprecated: Use libc wrappers instead of direct syscalls. const ( SYS_EXIT = 1 // { void sys_exit(int rval); } SYS_FORK = 2 // { int sys_fork(void); } diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux.go index ff688116..86984798 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -945,9 +945,6 @@ type PerfEventAttr struct { Aux_watermark uint32 Sample_max_stack uint16 _ uint16 - Aux_sample_size uint32 - _ uint32 - Sig_data uint64 } type PerfEventMmapPage struct { @@ -1466,11 +1463,6 @@ const ( IFLA_ALT_IFNAME = 0x35 IFLA_PERM_ADDRESS = 0x36 IFLA_PROTO_DOWN_REASON = 0x37 - IFLA_PARENT_DEV_NAME = 0x38 - IFLA_PARENT_DEV_BUS_NAME = 0x39 - IFLA_GRO_MAX_SIZE = 0x3a - IFLA_TSO_MAX_SIZE = 0x3b - IFLA_TSO_MAX_SEGS = 0x3c IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 IFLA_PROTO_DOWN_REASON_MASK = 0x1 IFLA_PROTO_DOWN_REASON_VALUE = 0x2 diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 26360440..7551af48 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/386/cgo -- -Wall -Werror -static -I/tmp/386/include -m32 linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 8187489d..3e738ac0 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/amd64/cgo -- -Wall -Werror -static -I/tmp/amd64/include -m64 linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d1612335..6183eef4 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/arm/cgo -- -Wall -Werror -static -I/tmp/arm/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index c28e5556..968cecb1 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/arm64/cgo -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 187061f9..8fe4c522 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/loong64/cgo -- -Wall -Werror -static -I/tmp/loong64/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 36912991..11426a30 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/mips/cgo -- -Wall -Werror -static -I/tmp/mips/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 7473468d..ad1c3b3d 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/mips64/cgo -- -Wall -Werror -static -I/tmp/mips64/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index ed944852..15fd84e4 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/mips64le/cgo -- -Wall -Werror -static -I/tmp/mips64le/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 0892a73a..49c49825 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/mipsle/cgo -- -Wall -Werror -static -I/tmp/mipsle/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index e1dd4833..cd36d0da 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/ppc/cgo -- -Wall -Werror -static -I/tmp/ppc/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index d9f654c7..8c6fce03 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/ppc64/cgo -- -Wall -Werror -static -I/tmp/ppc64/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 74acda9f..20910f2a 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/ppc64le/cgo -- -Wall -Werror -static -I/tmp/ppc64le/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 50ebe69e..71b7b333 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/riscv64/cgo -- -Wall -Werror -static -I/tmp/riscv64/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 75b34c25..71184cc2 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/s390x/cgo -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux diff --git a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 429c3bf7..06156285 100644 --- a/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/mantle/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -objdir=/tmp/sparc64/cgo -- -Wall -Werror -static -I/tmp/sparc64/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux diff --git a/mantle/vendor/golang.org/x/sys/windows/syscall_windows.go b/mantle/vendor/golang.org/x/sys/windows/syscall_windows.go index e2791381..be3ec2bd 100644 --- a/mantle/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/mantle/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -417,7 +417,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation //sys GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW //sys GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW -//sys QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSetEx // NT Native APIs //sys rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb @@ -972,32 +971,6 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { return unsafe.Pointer(&sa.raw), sl, nil } -type RawSockaddrBth struct { - AddressFamily [2]byte - BtAddr [8]byte - ServiceClassId [16]byte - Port [4]byte -} - -type SockaddrBth struct { - BtAddr uint64 - ServiceClassId GUID - Port uint32 - - raw RawSockaddrBth -} - -func (sa *SockaddrBth) sockaddr() (unsafe.Pointer, int32, error) { - family := AF_BTH - sa.raw = RawSockaddrBth{ - AddressFamily: *(*[2]byte)(unsafe.Pointer(&family)), - BtAddr: *(*[8]byte)(unsafe.Pointer(&sa.BtAddr)), - Port: *(*[4]byte)(unsafe.Pointer(&sa.Port)), - ServiceClassId: *(*[16]byte)(unsafe.Pointer(&sa.ServiceClassId)), - } - return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil -} - func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { switch rsa.Addr.Family { case AF_UNIX: @@ -1734,71 +1707,3 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { h.Cap = int(size) return } - -// PSAPI_WORKING_SET_EX_BLOCK contains extended working set information for a page. -type PSAPI_WORKING_SET_EX_BLOCK uint64 - -// Valid returns the validity of this page. -// If this bit is 1, the subsequent members are valid; otherwise they should be ignored. -func (b PSAPI_WORKING_SET_EX_BLOCK) Valid() bool { - return (b & 1) == 1 -} - -// ShareCount is the number of processes that share this page. The maximum value of this member is 7. -func (b PSAPI_WORKING_SET_EX_BLOCK) ShareCount() uint64 { - return b.intField(1, 3) -} - -// Win32Protection is the memory protection attributes of the page. For a list of values, see -// https://docs.microsoft.com/en-us/windows/win32/memory/memory-protection-constants -func (b PSAPI_WORKING_SET_EX_BLOCK) Win32Protection() uint64 { - return b.intField(4, 11) -} - -// Shared returns the shared status of this page. -// If this bit is 1, the page can be shared. -func (b PSAPI_WORKING_SET_EX_BLOCK) Shared() bool { - return (b & (1 << 15)) == 1 -} - -// Node is the NUMA node. The maximum value of this member is 63. -func (b PSAPI_WORKING_SET_EX_BLOCK) Node() uint64 { - return b.intField(16, 6) -} - -// Locked returns the locked status of this page. -// If this bit is 1, the virtual page is locked in physical memory. -func (b PSAPI_WORKING_SET_EX_BLOCK) Locked() bool { - return (b & (1 << 22)) == 1 -} - -// LargePage returns the large page status of this page. -// If this bit is 1, the page is a large page. -func (b PSAPI_WORKING_SET_EX_BLOCK) LargePage() bool { - return (b & (1 << 23)) == 1 -} - -// Bad returns the bad status of this page. -// If this bit is 1, the page is has been reported as bad. -func (b PSAPI_WORKING_SET_EX_BLOCK) Bad() bool { - return (b & (1 << 31)) == 1 -} - -// intField extracts an integer field in the PSAPI_WORKING_SET_EX_BLOCK union. -func (b PSAPI_WORKING_SET_EX_BLOCK) intField(start, length int) uint64 { - var mask PSAPI_WORKING_SET_EX_BLOCK - for pos := start; pos < start+length; pos++ { - mask |= (1 << pos) - } - - masked := b & mask - return uint64(masked >> start) -} - -// PSAPI_WORKING_SET_EX_INFORMATION contains extended working set information for a process. -type PSAPI_WORKING_SET_EX_INFORMATION struct { - // The virtual address. - VirtualAddress Pointer - // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. - VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK -} diff --git a/mantle/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/mantle/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 52d4742c..678262cd 100644 --- a/mantle/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/mantle/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -408,7 +408,6 @@ var ( procGetModuleBaseNameW = modpsapi.NewProc("GetModuleBaseNameW") procGetModuleFileNameExW = modpsapi.NewProc("GetModuleFileNameExW") procGetModuleInformation = modpsapi.NewProc("GetModuleInformation") - procQueryWorkingSetEx = modpsapi.NewProc("QueryWorkingSetEx") procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") @@ -3505,14 +3504,6 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb return } -func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { ret = procSubscribeServiceChangeNotifications.Find() if ret != nil { diff --git a/mantle/vendor/golang.org/x/term/README.md b/mantle/vendor/golang.org/x/term/README.md index d03d0aef..e0f390cb 100644 --- a/mantle/vendor/golang.org/x/term/README.md +++ b/mantle/vendor/golang.org/x/term/README.md @@ -1,7 +1,5 @@ # Go terminal/console support -[![Go Reference](https://pkg.go.dev/badge/golang.org/x/term.svg)](https://pkg.go.dev/golang.org/x/term) - This repository provides Go terminal and console support packages. ## Download/Install diff --git a/mantle/vendor/golang.org/x/term/codereview.cfg b/mantle/vendor/golang.org/x/term/codereview.cfg deleted file mode 100644 index 3f8b14b6..00000000 --- a/mantle/vendor/golang.org/x/term/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/mantle/vendor/golang.org/x/term/go.mod b/mantle/vendor/golang.org/x/term/go.mod deleted file mode 100644 index edf0e5b1..00000000 --- a/mantle/vendor/golang.org/x/term/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module golang.org/x/term - -go 1.17 - -require golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 diff --git a/mantle/vendor/golang.org/x/term/go.sum b/mantle/vendor/golang.org/x/term/go.sum deleted file mode 100644 index ff132135..00000000 --- a/mantle/vendor/golang.org/x/term/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/mantle/vendor/golang.org/x/term/term.go b/mantle/vendor/golang.org/x/term/term.go index d5927088..69931cc8 100644 --- a/mantle/vendor/golang.org/x/term/term.go +++ b/mantle/vendor/golang.org/x/term/term.go @@ -7,13 +7,11 @@ // // Putting a terminal into raw mode is the most common requirement: // -// oldState, err := term.MakeRaw(int(os.Stdin.Fd())) +// oldState, err := terminal.MakeRaw(0) // if err != nil { // panic(err) // } -// defer term.Restore(int(os.Stdin.Fd()), oldState) -// -// Note that on non-Unix systems os.Stdin.Fd() may not be 0. +// defer terminal.Restore(0, oldState) package term // State contains the state of a terminal. diff --git a/mantle/vendor/golang.org/x/term/term_solaris.go b/mantle/vendor/golang.org/x/term/term_solaris.go new file mode 100644 index 00000000..b9da2974 --- /dev/null +++ b/mantle/vendor/golang.org/x/term/term_solaris.go @@ -0,0 +1,111 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package term + +import ( + "io" + "syscall" + + "golang.org/x/sys/unix" +) + +// State contains the state of a terminal. +type state struct { + termios unix.Termios +} + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} + +func readPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} + +func makeRaw(fd int) (*State, error) { + // see http://cr.illumos.org/~webrev/andy_js/1060/ + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + oldState := State{state{termios: *termios}} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +func restore(fd int, oldState *State) error { + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) +} + +func getState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + return &State{state{termios: *termios}}, nil +} + +func getSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} diff --git a/mantle/vendor/golang.org/x/term/term_unix.go b/mantle/vendor/golang.org/x/term/term_unix.go index a4e31ab1..4c60e457 100644 --- a/mantle/vendor/golang.org/x/term/term_unix.go +++ b/mantle/vendor/golang.org/x/term/term_unix.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd zos package term diff --git a/mantle/vendor/golang.org/x/term/term_unix_other.go b/mantle/vendor/golang.org/x/term/term_unix_aix.go similarity index 63% rename from mantle/vendor/golang.org/x/term/term_unix_other.go rename to mantle/vendor/golang.org/x/term/term_unix_aix.go index 1e8955c9..2d5efd26 100644 --- a/mantle/vendor/golang.org/x/term/term_unix_other.go +++ b/mantle/vendor/golang.org/x/term/term_unix_aix.go @@ -1,10 +1,7 @@ -// Copyright 2021 The Go Authors. All rights reserved. +// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || linux || solaris || zos -// +build aix linux solaris zos - package term import "golang.org/x/sys/unix" diff --git a/mantle/vendor/golang.org/x/term/term_unix_bsd.go b/mantle/vendor/golang.org/x/term/term_unix_bsd.go index 853b3d69..3342be00 100644 --- a/mantle/vendor/golang.org/x/term/term_unix_bsd.go +++ b/mantle/vendor/golang.org/x/term/term_unix_bsd.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin || dragonfly || freebsd || netbsd || openbsd // +build darwin dragonfly freebsd netbsd openbsd package term diff --git a/mantle/vendor/golang.org/x/net/http2/not_go118.go b/mantle/vendor/golang.org/x/term/term_unix_linux.go similarity index 34% rename from mantle/vendor/golang.org/x/net/http2/not_go118.go rename to mantle/vendor/golang.org/x/term/term_unix_linux.go index eab532c9..2d5efd26 100644 --- a/mantle/vendor/golang.org/x/net/http2/not_go118.go +++ b/mantle/vendor/golang.org/x/term/term_unix_linux.go @@ -1,17 +1,10 @@ -// Copyright 2021 The Go Authors. All rights reserved. +// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !go1.18 -// +build !go1.18 +package term -package http2 +import "golang.org/x/sys/unix" -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/mantle/vendor/golang.org/x/net/http2/go118.go b/mantle/vendor/golang.org/x/term/term_unix_zos.go similarity index 33% rename from mantle/vendor/golang.org/x/net/http2/go118.go rename to mantle/vendor/golang.org/x/term/term_unix_zos.go index aca4b2b3..b85ab899 100644 --- a/mantle/vendor/golang.org/x/net/http2/go118.go +++ b/mantle/vendor/golang.org/x/term/term_unix_zos.go @@ -1,17 +1,10 @@ -// Copyright 2021 The Go Authors. All rights reserved. +// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 -// +build go1.18 +package term -package http2 +import "golang.org/x/sys/unix" -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/mantle/vendor/golang.org/x/term/term_unsupported.go b/mantle/vendor/golang.org/x/term/term_unsupported.go index f1df8506..8b5d1bad 100644 --- a/mantle/vendor/golang.org/x/term/term_unsupported.go +++ b/mantle/vendor/golang.org/x/term/term_unsupported.go @@ -2,7 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9 // +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9 package term diff --git a/mantle/vendor/google.golang.org/appengine/go.mod b/mantle/vendor/google.golang.org/appengine/go.mod deleted file mode 100644 index 635c34f5..00000000 --- a/mantle/vendor/google.golang.org/appengine/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module google.golang.org/appengine - -go 1.11 - -require ( - github.com/golang/protobuf v1.3.1 - golang.org/x/net v0.0.0-20190603091049-60506f45cf65 - golang.org/x/text v0.3.2 -) diff --git a/mantle/vendor/google.golang.org/appengine/go.sum b/mantle/vendor/google.golang.org/appengine/go.sum deleted file mode 100644 index ce22f685..00000000 --- a/mantle/vendor/google.golang.org/appengine/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/mantle/vendor/google.golang.org/grpc/go.mod b/mantle/vendor/google.golang.org/grpc/go.mod deleted file mode 100644 index 31f2b01f..00000000 --- a/mantle/vendor/google.golang.org/grpc/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module google.golang.org/grpc - -go 1.11 - -require ( - github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f - github.com/envoyproxy/go-control-plane v0.9.4 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/protobuf v1.3.3 - github.com/google/go-cmp v0.4.0 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 -) diff --git a/mantle/vendor/google.golang.org/grpc/go.sum b/mantle/vendor/google.golang.org/grpc/go.sum deleted file mode 100644 index be8078ea..00000000 --- a/mantle/vendor/google.golang.org/grpc/go.sum +++ /dev/null @@ -1,68 +0,0 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/mantle/vendor/gopkg.in/yaml.v2/go.mod b/mantle/vendor/gopkg.in/yaml.v2/go.mod deleted file mode 100644 index 2cbb85ae..00000000 --- a/mantle/vendor/gopkg.in/yaml.v2/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module gopkg.in/yaml.v2 - -go 1.15 - -require gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 diff --git a/mantle/vendor/gopkg.in/yaml.v3/decode.go b/mantle/vendor/gopkg.in/yaml.v3/decode.go index df36e3a3..0173b698 100644 --- a/mantle/vendor/gopkg.in/yaml.v3/decode.go +++ b/mantle/vendor/gopkg.in/yaml.v3/decode.go @@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { if p.event.typ != yaml_NO_EVENT { return p.event.typ } - if !yaml_parser_parse(&p.parser, &p.event) { + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { p.fail() } return p.event.typ @@ -320,6 +323,8 @@ type decoder struct { decodeCount int aliasCount int aliasDepth int + + mergedFields map[interface{}]bool } var ( @@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + mapIsNew := false if out.IsNil() { out.Set(reflect.MakeMap(outt)) @@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } for i := 0; i < l; i += 2 { if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } k := reflect.New(kt).Elem() if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if mergedFields[ki] { + continue + } + mergedFields[ki] = true + } kkind := k.Kind() if kkind == reflect.Interface { kkind = k.Elem().Kind() @@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { } } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + d.stringMapType = stringMapType d.generalMapType = generalMapType return true @@ -844,7 +867,8 @@ func isStringMap(n *Node) bool { } l := len(n.Content) for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { return false } } @@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { var elemType reflect.Type if sinfo.InlineMap != -1 { inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) elemType = inlineMap.Type().Elem() } @@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.prepare(n, field) } + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node var doneFields []bool if d.uniqueKeys { doneFields = make([]bool, len(sinfo.FieldsList)) @@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { for i := 0; i < l; i += 2 { ni := n.Content[i] if isMerge(ni) { - d.merge(n.Content[i+1], out) + mergeNode = n.Content[i+1] continue } if !d.unmarshal(ni, name) { continue } - if info, ok := sinfo.FieldsMap[name.String()]; ok { + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { if d.uniqueKeys { if doneFields[info.Id] { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) @@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) } } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } return true } @@ -918,19 +956,29 @@ func failWantMap() { failf("map merge requires map or sequence of maps as the value") } -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[interface{}]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.mergedFields[k.Interface()] = true + } + } + } + + switch merge.Kind { case MappingNode: - d.unmarshal(n, out) + d.unmarshal(merge, out) case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { + if merge.Alias != nil && merge.Alias.Kind != MappingNode { failWantMap() } - d.unmarshal(n, out) + d.unmarshal(merge, out) case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] if ni.Kind == AliasNode { if ni.Alias != nil && ni.Alias.Kind != MappingNode { failWantMap() @@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { default: failWantMap() } + + d.mergedFields = mergedFields } func isMerge(n *Node) bool { diff --git a/mantle/vendor/gopkg.in/yaml.v3/go.mod b/mantle/vendor/gopkg.in/yaml.v3/go.mod deleted file mode 100644 index f407ea32..00000000 --- a/mantle/vendor/gopkg.in/yaml.v3/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module "gopkg.in/yaml.v3" - -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) diff --git a/mantle/vendor/gopkg.in/yaml.v3/parserc.go b/mantle/vendor/gopkg.in/yaml.v3/parserc.go index ac66fccc..268558a0 100644 --- a/mantle/vendor/gopkg.in/yaml.v3/parserc.go +++ b/mantle/vendor/gopkg.in/yaml.v3/parserc.go @@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { } token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { return } @@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } @@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { if first { token := peek_token(parser) + if token == nil { + return false + } parser.marks = append(parser.marks, token.start_mark) skip_token(parser) } diff --git a/mantle/vendor/modules.txt b/mantle/vendor/modules.txt index ea13c098..140eaf4d 100644 --- a/mantle/vendor/modules.txt +++ b/mantle/vendor/modules.txt @@ -1,15 +1,15 @@ # cloud.google.com/go v0.65.0 +## explicit; go 1.11 cloud.google.com/go/compute/metadata # github.com/Azure/azure-sdk-for-go v8.1.0-beta+incompatible +## explicit github.com/Azure/azure-sdk-for-go/arm/compute github.com/Azure/azure-sdk-for-go/arm/network github.com/Azure/azure-sdk-for-go/arm/resources/resources github.com/Azure/azure-sdk-for-go/arm/storage -github.com/Azure/azure-sdk-for-go/management -github.com/Azure/azure-sdk-for-go/management/location -github.com/Azure/azure-sdk-for-go/management/storageservice github.com/Azure/azure-sdk-for-go/storage # github.com/Azure/go-autorest v9.1.0+incompatible +## explicit github.com/Azure/go-autorest/autorest github.com/Azure/go-autorest/autorest/adal github.com/Azure/go-autorest/autorest/azure @@ -18,6 +18,7 @@ github.com/Azure/go-autorest/autorest/date github.com/Azure/go-autorest/autorest/to github.com/Azure/go-autorest/autorest/validation # github.com/IBM-Cloud/bluemix-go v0.0.0-20210419045805-b50610722085 +## explicit; go 1.13 github.com/IBM-Cloud/bluemix-go github.com/IBM-Cloud/bluemix-go/api/iampap/iampapv2 github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog @@ -37,6 +38,7 @@ github.com/IBM-Cloud/bluemix-go/session github.com/IBM-Cloud/bluemix-go/trace github.com/IBM-Cloud/bluemix-go/utils # github.com/IBM/ibm-cos-sdk-go v1.6.1 +## explicit; go 1.13 github.com/IBM/ibm-cos-sdk-go/aws github.com/IBM/ibm-cos-sdk-go/aws/arn github.com/IBM/ibm-cos-sdk-go/aws/awserr @@ -77,6 +79,7 @@ github.com/IBM/ibm-cos-sdk-go/service/s3/internal/arn github.com/IBM/ibm-cos-sdk-go/service/s3/s3iface github.com/IBM/ibm-cos-sdk-go/service/s3/s3manager # github.com/Microsoft/azure-vhd-utils v0.0.0-20161127050200-43293b8d7646 +## explicit github.com/Microsoft/azure-vhd-utils/upload github.com/Microsoft/azure-vhd-utils/upload/concurrent github.com/Microsoft/azure-vhd-utils/upload/metadata @@ -95,6 +98,7 @@ github.com/Microsoft/azure-vhd-utils/vhdcore/validator github.com/Microsoft/azure-vhd-utils/vhdcore/vhdfile github.com/Microsoft/azure-vhd-utils/vhdcore/writer # github.com/aliyun/alibaba-cloud-sdk-go v1.61.1442 +## explicit; go 1.13 github.com/aliyun/alibaba-cloud-sdk-go/sdk github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials @@ -107,8 +111,10 @@ github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils github.com/aliyun/alibaba-cloud-sdk-go/services/ecs # github.com/aliyun/aliyun-oss-go-sdk v2.0.3+incompatible +## explicit github.com/aliyun/aliyun-oss-go-sdk/oss # github.com/aws/aws-sdk-go v1.34.28 +## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -157,9 +163,13 @@ github.com/aws/aws-sdk-go/service/s3/s3iface github.com/aws/aws-sdk-go/service/s3/s3manager github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/clarketm/json v1.14.1 +# github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f +## explicit +# github.com/clarketm/json v1.17.1 +## explicit github.com/clarketm/json -# github.com/coreos/butane v0.14.0 +# github.com/coreos/butane v0.16.0 +## explicit; go 1.17 github.com/coreos/butane/base/util github.com/coreos/butane/base/v0_1 github.com/coreos/butane/base/v0_2 @@ -174,10 +184,16 @@ github.com/coreos/butane/config/fcos/v1_2 github.com/coreos/butane/config/fcos/v1_3 github.com/coreos/butane/config/fcos/v1_4 github.com/coreos/butane/config/fcos/v1_5_exp +github.com/coreos/butane/config/flatcar/v1_0 +github.com/coreos/butane/config/flatcar/v1_1_exp github.com/coreos/butane/config/openshift/v4_10 github.com/coreos/butane/config/openshift/v4_10/result -github.com/coreos/butane/config/openshift/v4_11_exp -github.com/coreos/butane/config/openshift/v4_11_exp/result +github.com/coreos/butane/config/openshift/v4_11 +github.com/coreos/butane/config/openshift/v4_11/result +github.com/coreos/butane/config/openshift/v4_12 +github.com/coreos/butane/config/openshift/v4_12/result +github.com/coreos/butane/config/openshift/v4_13_exp +github.com/coreos/butane/config/openshift/v4_13_exp/result github.com/coreos/butane/config/openshift/v4_8 github.com/coreos/butane/config/openshift/v4_8/result github.com/coreos/butane/config/openshift/v4_9 @@ -185,20 +201,26 @@ github.com/coreos/butane/config/openshift/v4_9/result github.com/coreos/butane/config/rhcos/v0_1 github.com/coreos/butane/config/util github.com/coreos/butane/translate -# github.com/coreos/coreos-assembler-schema v0.0.0-00010101000000-000000000000 => ../schema -github.com/coreos/coreos-assembler-schema/cosa -# github.com/coreos/go-json v0.0.0-20211020211907-c63f628265de +# github.com/coreos/coreos-assembler v0.14.0 => ../ +## explicit; go 1.17 +github.com/coreos/coreos-assembler/pkg/builds +# github.com/coreos/go-json v0.0.0-20220810161552-7cce03887f34 +## explicit; go 1.17 github.com/coreos/go-json # github.com/coreos/go-semver v0.3.0 +## explicit github.com/coreos/go-semver/semver # github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e +## explicit github.com/coreos/go-systemd/journal github.com/coreos/go-systemd/unit -# github.com/coreos/go-systemd/v22 v22.0.0 +# github.com/coreos/go-systemd/v22 v22.4.0 +## explicit; go 1.12 github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/journal github.com/coreos/go-systemd/v22/unit -# github.com/coreos/ignition/v2 v2.13.0 +# github.com/coreos/ignition/v2 v2.14.0 +## explicit; go 1.15 github.com/coreos/ignition/v2/config/merge github.com/coreos/ignition/v2/config/shared/errors github.com/coreos/ignition/v2/config/shared/validations @@ -219,12 +241,12 @@ github.com/coreos/ignition/v2/config/v3_4_experimental github.com/coreos/ignition/v2/config/v3_4_experimental/translate github.com/coreos/ignition/v2/config/v3_4_experimental/types github.com/coreos/ignition/v2/config/validate -# github.com/coreos/ioprogress v0.0.0-20151023204047-4637e494fd9b -github.com/coreos/ioprogress # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f +## explicit github.com/coreos/pkg/capnslog github.com/coreos/pkg/multierror -# github.com/coreos/stream-metadata-go v0.1.7 +# github.com/coreos/stream-metadata-go v0.4.0 +## explicit; go 1.17 github.com/coreos/stream-metadata-go/arch github.com/coreos/stream-metadata-go/fedoracoreos github.com/coreos/stream-metadata-go/fedoracoreos/internals @@ -232,7 +254,8 @@ github.com/coreos/stream-metadata-go/release github.com/coreos/stream-metadata-go/release/rhcos github.com/coreos/stream-metadata-go/stream github.com/coreos/stream-metadata-go/stream/rhcos -# github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4 +# github.com/coreos/vcontext v0.0.0-20220810162454-88bd546c634c +## explicit; go 1.17 github.com/coreos/vcontext/json github.com/coreos/vcontext/path github.com/coreos/vcontext/report @@ -240,38 +263,49 @@ github.com/coreos/vcontext/tree github.com/coreos/vcontext/validate github.com/coreos/vcontext/yaml # github.com/davecgh/go-spew v1.1.1 +## explicit github.com/davecgh/go-spew/spew # github.com/dgrijalva/jwt-go v3.2.0+incompatible +## explicit github.com/dgrijalva/jwt-go # github.com/digitalocean/go-libvirt v0.0.0-20200810224808-b9c702499bf7 +## explicit; go 1.14 github.com/digitalocean/go-libvirt github.com/digitalocean/go-libvirt/internal/constants github.com/digitalocean/go-libvirt/internal/go-xdr/xdr2 # github.com/digitalocean/go-qemu v0.0.0-20200529005954-1b453d036a9c +## explicit github.com/digitalocean/go-qemu/qmp # github.com/digitalocean/godo v1.33.0 +## explicit; go 1.13 github.com/digitalocean/godo # github.com/dimchansky/utfbom v1.1.1 +## explicit github.com/dimchansky/utfbom -# github.com/dustin/go-humanize v1.0.0 -github.com/dustin/go-humanize -# github.com/godbus/dbus/v5 v5.0.3 +# github.com/godbus/dbus/v5 v5.0.4 +## explicit; go 1.12 github.com/godbus/dbus/v5 # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e +## explicit github.com/golang/groupcache/lru # github.com/golang/protobuf v1.4.2 +## explicit; go 1.9 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp # github.com/google/go-querystring v1.0.0 +## explicit github.com/google/go-querystring/query # github.com/google/uuid v1.1.1 +## explicit github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 +## explicit github.com/googleapis/gax-go/v2 # github.com/gophercloud/gophercloud v0.22.0 +## explicit; go 1.13 github.com/gophercloud/gophercloud github.com/gophercloud/gophercloud/openstack github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume @@ -294,80 +328,79 @@ github.com/gophercloud/gophercloud/openstack/networking/v2/networks github.com/gophercloud/gophercloud/openstack/utils github.com/gophercloud/gophercloud/pagination # github.com/gophercloud/utils v0.0.0-20210323225332-7b186010c04f +## explicit; go 1.15 github.com/gophercloud/utils/env github.com/gophercloud/utils/gnocchi github.com/gophercloud/utils/internal github.com/gophercloud/utils/openstack/clientconfig github.com/gophercloud/utils/openstack/networking/v2/extensions/security/groups # github.com/inconshreveable/mousetrap v1.0.0 +## explicit github.com/inconshreveable/mousetrap # github.com/jmespath/go-jmespath v0.4.0 +## explicit; go 1.14 github.com/jmespath/go-jmespath # github.com/json-iterator/go v1.1.10 +## explicit; go 1.12 github.com/json-iterator/go # github.com/kballard/go-shellquote v0.0.0-20150810074751-d8ec1a69a250 +## explicit github.com/kballard/go-shellquote -# github.com/klauspost/cpuid v1.3.1 -github.com/klauspost/cpuid # github.com/kylelemons/godebug v0.0.0-20150519154555-21cb3784d9bd +## explicit github.com/kylelemons/godebug/diff github.com/kylelemons/godebug/pretty -# github.com/minio/md5-simd v1.1.0 -github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.12 -github.com/minio/minio-go/v7 -github.com/minio/minio-go/v7/pkg/credentials -github.com/minio/minio-go/v7/pkg/encrypt -github.com/minio/minio-go/v7/pkg/lifecycle -github.com/minio/minio-go/v7/pkg/notification -github.com/minio/minio-go/v7/pkg/replication -github.com/minio/minio-go/v7/pkg/s3utils -github.com/minio/minio-go/v7/pkg/set -github.com/minio/minio-go/v7/pkg/signer -github.com/minio/minio-go/v7/pkg/sse -github.com/minio/minio-go/v7/pkg/tags -# github.com/minio/sha256-simd v0.1.1 -github.com/minio/sha256-simd # github.com/mitchellh/go-homedir v1.1.0 +## explicit github.com/mitchellh/go-homedir # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd +## explicit github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 +## explicit github.com/modern-go/reflect2 # github.com/packethost/packngo v0.0.0-20180426081943-80f62d78849d +## explicit github.com/packethost/packngo # github.com/pborman/uuid v1.2.0 +## explicit github.com/pborman/uuid # github.com/pin/tftp v2.1.0+incompatible +## explicit github.com/pin/tftp github.com/pin/tftp/netascii # github.com/pkg/errors v0.9.1 +## explicit github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 +## explicit github.com/pmezard/go-difflib/difflib -# github.com/rs/xid v1.2.1 -github.com/rs/xid -# github.com/sirupsen/logrus v1.8.1 +# github.com/satori/go.uuid v1.2.0 +## explicit +# github.com/sirupsen/logrus v1.9.0 +## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spf13/cobra v0.0.6 +# github.com/spf13/cobra v1.5.0 +## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace +## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/testify v1.7.0 +# github.com/stretchr/testify v1.8.0 +## explicit; go 1.13 github.com/stretchr/testify/assert -# github.com/ulikunitz/xz v0.5.10 -github.com/ulikunitz/xz -github.com/ulikunitz/xz/internal/hash -github.com/ulikunitz/xz/internal/xlog -github.com/ulikunitz/xz/lzma # github.com/vincent-petithory/dataurl v1.0.0 +## explicit github.com/vincent-petithory/dataurl # github.com/vishvananda/netlink v0.0.0-20150710184826-9cff81214893 +## explicit github.com/vishvananda/netlink github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.0-20150710222425-604eaf189ee8 +## explicit github.com/vishvananda/netns # github.com/vmware/govmomi v0.15.0 +## explicit github.com/vmware/govmomi github.com/vmware/govmomi/find github.com/vmware/govmomi/list @@ -385,12 +418,16 @@ github.com/vmware/govmomi/vim25/soap github.com/vmware/govmomi/vim25/types github.com/vmware/govmomi/vim25/xml # github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f +## explicit github.com/xeipuuv/gojsonpointer # github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 +## explicit github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.2.0 +## explicit github.com/xeipuuv/gojsonschema # go.opencensus.io v0.22.5 +## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal go.opencensus.io/internal/tagencoding @@ -407,31 +444,20 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 -golang.org/x/crypto/argon2 -golang.org/x/crypto/blake2b +# golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd +## explicit; go 1.17 golang.org/x/crypto/blowfish -golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/ed25519 -golang.org/x/crypto/ed25519/internal/edwards25519 golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/internal/subtle -golang.org/x/crypto/openpgp -golang.org/x/crypto/openpgp/armor -golang.org/x/crypto/openpgp/elgamal -golang.org/x/crypto/openpgp/errors -golang.org/x/crypto/openpgp/packet -golang.org/x/crypto/openpgp/s2k -golang.org/x/crypto/pkcs12 -golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20220722155237-a158d28d115b +# golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 +## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts @@ -439,24 +465,27 @@ golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries -golang.org/x/net/publicsuffix golang.org/x/net/trace golang.org/x/net/websocket # golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 +## explicit; go 1.11 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab +# golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f +## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 +# golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 +## explicit; go 1.11 golang.org/x/term -# golang.org/x/text v0.3.7 +# golang.org/x/text v0.3.6 +## explicit; go 1.11 golang.org/x/text/encoding golang.org/x/text/encoding/charmap golang.org/x/text/encoding/internal @@ -469,8 +498,10 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20191024005414-555d28b269f0 +## explicit golang.org/x/time/rate # google.golang.org/api v0.34.0 +## explicit; go 1.11 google.golang.org/api/compute/v1 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -486,6 +517,7 @@ google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.6 +## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal google.golang.org/appengine/internal/app_identity @@ -497,8 +529,10 @@ google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch # google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d +## explicit; go 1.11 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.31.1 +## explicit; go 1.11 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -541,6 +575,7 @@ google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap # google.golang.org/protobuf v1.25.0 +## explicit; go 1.9 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt @@ -571,8 +606,13 @@ google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb # gopkg.in/ini.v1 v1.66.2 +## explicit gopkg.in/ini.v1 # gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +# gopkg.in/yaml.v3 v3.0.1 +## explicit gopkg.in/yaml.v3 +# github.com/coreos/coreos-assembler => ../ +# google.golang.org/cloud => cloud.google.com/go v0.0.0-20190220171618-cbb15e60dc6d diff --git a/rpm-package/rpm-ostree-2022.16-3.oe2203sp2.aarch64.rpm b/rpm-package/rpm-ostree-2022.16-3.oe2203sp2.aarch64.rpm new file mode 100644 index 0000000000000000000000000000000000000000..eb03488758c1ea32ad231a69bd73741e44f35d9c GIT binary patch literal 3289873 zcmeFYc|6tI_c(lz5Sh!&A!X*w1DP_dG^7*pSquW@ArOx&+Ga9^W1l>v)*g3wbx#I?eV?WHdQ#?!vp~jUx2@ll3yS# zfI?A1zz_&!I7SJn>_A0if0Rio9%I-@V}U@p%fY(_0Q!3#z}yfB z!&ZPd1E4=ZmVpOgIxV+OGmQbf34r$oJqa+#5Ag>8@-xi@AIz*{`J7W9{AS-|9aqG5B%$ae?9Q82mbZIzaIG41OIy9Ul08M z-UDlABCD&bDZmrpOalV>>D)vM0+ICrNtyt_xrVGWz;qch09F9>hkousKPRD|n=o$B zA2(?F`3jvszCmx>py}r*Yyc!T=&21_eS@9`7@VsxT-u=N=PW=!L*54c31B+EdV~JF z!LQq(ziiNr8}$4J{cM9?Sf?53XEk*D76AtKFmi0rD;s>l4f^{A{hNJ1HfWu7{w81x z-5y3$fJMP8-520!i;-?0ou=27PJi8?w*U;}nG`o@c7TCDnX)!$x*dRKrQ1c%Pxm8` zXLH)1xz_n?&o^j#n*jMO_8T-mz`%wrLjZ&NZKC@f$E7nUUb?BV4zPldV{9d6Ud8Z0Zg|a*u2K4mrti1 z*ZE>VW=;S8b-ozTu|_*>@PY0%{q%kfcw(nE=mYDt#2bKt?=}$u2KANvt-j=STIvkI zbo(hA^!W{%x{JJiN)!KuSO`B>?K- zOY?&ge4!TBHsJgi8bG1vq4@=bDQ*dI_aM1LNq|Xnr}#pNYg}&+5`jh`Ly2KflD7xN z7nHbWX&^MjgXRt;(ENNnNYEgEw*Uf}5~#dIn@skgdHDGfyuHH|p+Uah9$plv55YGK zD&-#(K#^Ln_HRlQ*R}k|VOuB!x(z>rKf3Kw?)IY#4*n)N1AYr4Dud=yA^8PR`~p?}Q+#zr5j0uTn4Yhd17tI8TpPlG_vV6LL~QTlUy zLnnA;2YTp_ei0Z3G7yN+!y|cX|E4zZI+d5ZdIY3sPvx%u_D1JxucfU^uIu^V|B^!L zBq>{Mw`^cRI{q^NZ59RO*nX0wzi)s|$VPrZ|Nbttk+z0^*9k*o2ox%ngu}wHcp@5x zMw2KgECov@p$KFWoC=3gkW>N*1Rn4h3=v1dB4A_?M<5a57!mY#g@nbU zU<4!ui^0G#NH_|ELZT@+3WA8BQmHr;0*@hLP;eL-PDSGoXe0$ez>x4{EESHSV&QNk z1VhGaa1G@fk&c|C<+WkhLZ_sBpHq)0K;G?9F88hL8H+)0to1Ue=rQF z6a*;2piopi4n@IZ3BYhX5`jm663`%!gTSG%WCZXVnhH!IkWp|75l2Sh&^R1{NFozS zR3Zd~d|;q@a5M^sB_d&X6d6fIz^O1ChDt$^NoXVvNhG6?WHbg;4zvmyO-6%Y5rGH< zQ3NC&f`+5<2r8O@z`$T6JdzAr3$z*mG&Bsvh_F}$3Wq0xkP;P1MpA))PJ8I2?^5 zQb2b?f&oJzKrl!$71R?Cj0R1JCBZ-u2s8ylgu@X86dZI!Gzmq<~DLWF@%f=3`Iz?U#I z83%d-hDbsZK@tf=pl@M+f4n`2U`^WyQNxu3{gmO#Fl88>udJ-H{-5aK`}4y0SLwg& z{P~3*cmDt4p?{CjpXd#M1o-*U{{8tAAN=hN1m(a0k}~K35D55A{=fQ={Cue%ZnFQ` zP|1NE)qnLxSvgq@is(U;`)RxV)2r;-_Dcza`kAi;^aO%#p#@;=qn^Is0*}_e@PF)s zwU4VU-~;P-p!-vWJ~ITmLx@2{qBkWZfZz}LlV+s`1yUg1WY8>N!>qKiVP2bQ{?odD z&NeVL+N0wFZp~nS?*^c4={s(ZV9J^Xr={ z7X$y8^KaGS|3)nm@pnw*-!W0YFbV#2&meyFOhA}F&2P<%%J9G8SXkDcL|r#|5eUSDDtiztqCb!;k*KMpp{^RVirqzu@>0Jt-vGUoidM zeAYY4uQl`x^z&6(YbNwB#r&LZY;9q#;{sEL|KiFu2{*bm^mX#T?EDipkp1t}pZ)0L zM-K9)_>k$^er=*(mgQdz{gnfT2{3U%daflD~N2ZzX>jD1R&Y z%c%KV$zO)h-$-JW;lFgZzZdxBPPe*&HV(4Ro^mv;B}^Y-$fDY6t= zUnewwKY!YPPapI%zCTIz_n{}^f0+Tm#>rz%_SfV7I8{#5TlZx0#;`QH@^4D?>J@sC#iC;2ZIoj@9yq@Y00{p&ja zbGnYbz4jg>P|IIh@6RQae_4NjmRZ{s*xQ2z?3bqf3lU}bFJ1aCrT&?&U#7di(uMf7 z5r1;bPo^s){*hgXUk3SKmH4Z#)()4~R;^#V_WG&w1_LZj)(+M-dS<#{ZTcmD_4m2bLQ!E20RFBZ#vuv=90`0>=8qJ$5YhZ5cVyJCxC=9JENqN3=+~Y+A%|k`^&X zy^a`>uSK-ys0$(Zx`hQ%ygdMh?*5t5@(Kf6aKAuhlCK6-3Hp1jAU_NJqbvvrIVj8{ zgyI3+z47uFy5SITD(~w<@FV-tt@(-23I(49AoxH(T>sq~oDvEGlHCcu2SJ1Yy8A!x zf2JR&gan_AL0&=0un~>3-$1Y?q0`8n3lVeKOunTO9=oj z{;5`2(1J=((A}UQ6~u5TP)a}mh*D4i1cHVFUq#A5030(Zfh47kFp-iu=)e}%N?w!z zUy3(fz}BA(f*8>Md@(~od}yN;>3n@qf;%D5Wqm&x=tA)&fUh!HIVf;D$!om?x*19v zW+(wOl>G@bk~`p;dXOl-z%)9K-gij!kP?Xo;vpcY1ls9u%f|t3(7JWDR;Ex{5Wu1D z$=4%JN*e326S==tf&J;L{5tPXU0K^T~{`LaI7uWvwfqss_27n&p zrpL880MO&toB-%?Yi{!%2=!;;>#Q~3|LhOCPyPtZAnnh5^iTb?<}1GdH|1aY z4gFYZebR@6;p9vGxr6+6i13GW@*f3%&O-zTgn^(>27zx@5MTxU{bw%8FUXfh|5p5K z7R|$l67W+VvJxQ(#8koX0AG^-mF;Klf93pL-pb#{V*Ss9=%16cFn=%~5dZ8T?9b9b z4%kojD8ZCK$e04cyYvCO?h3F!gKTWsfv;#FbPB4lkpQCFN`W43zCWoT|MU)7U&oa; zsul?O+ZOuG0e>s>hvtG<>1pel=_&jCQml@JrGt^VfieiwLVkOv$87(;e6Zv8Aca9x zC^V7^nHUtP1Xo$NUzx13p6b1}BcYq{dlEz?C@^5{5NrJn_{yIbP>S7pg=hI zKQVav=l?Ivp9Oz@|Ic*&A8|@3g+$l6-VlG84jS(dymiH3fefIi5D9_qYg4&5h2GvE zLJel_wb#InTf&VPmPd=pkVMC)I=~r1m|5r8FMM0y`mz7^F`aSV4 zGHrZ4!Bq%g0rK+&nUyRMP$f60lD~&P1*&8RZY+S`wN+FdESX>kf=ZBr0z7D8Al+Y4 zN!LZ+!pg=--@(P&!q!ShPaR}mPycOetZi+r2P7=at)WU(FfEY0l)&sm1FIH&38n?m z_bW=-}(i;<|=L!Fs?jEN}yrL;FYXIBEf-P zLSUFL$sOqO106ERH^jr2tVE+P%0SCTp(Jm5Gj0@3UuQ}F9)6I}a0vM0x%I0BN`C$n zUp;U_1THbGouIn;(l6Eg;i1pb|I_nNWd0`|zpck~UBA8m5!2Vs-_}n$kG?+qmPTJE zeoLcc`a|dcoVJ#3Z)OSwV+>06BL(?@%PfjeZEGDPBPfkNrTz4(XQ~gypp-z20BUNz z2kP$uwpUZcUDJHa8wiujlfet2pfaNkw91$ zM45>o0*?g2X95z0<%w7dRdH?e_qz>Hq%zpvD?&lP^YHfv7f>L7sMXqFB16}Y6aKnV z;7e=YU)1MUJK-=a3XcI-B0xNniYMW5NDy2EVOtOghJ#o*4vr+_z?}m0PbQJU-apMgYtJ7y>W{U;@AtfIa{{00sc00iXe304M|4 z2|x~j5&(GsG5}Nn$O1qB*a1KifC2y{05|{?07U>&0AK)i0nh@l1;ADS>;O0bumRu( zzzKj004o4K02%<)0q_9O1t11sGXN|A0RTb(1Occ5Py=8AKm-6DfF^+503-nH0l*7D z8-Or?y#Sy9_yMp0*aX1N5{!9(>HeeFg>Ez5KDs@08|gOD%ctl2tvnof|Jjb;b%AvH z!x}`RD&ZhV5&{n5?qm$;)np@4H0e@~xg=0aKoj|{`K_TJ6Z5Rw5 z-2I@?L-BADl0N$Ax=`Te1(k|HkkEjMpx<#ofa?@^Gz@_!fsj8AO-5nScq)=Y1eb*n za5NH50^xQP8QfI>v3fXxu4L_U@#^Xd{hj+eHryWE-rVwGnIo4GBFVrD5_eR_DURmK z%=~bM2yX4$L_{nFu}5F7;o7@7qQTU5pRwf9uz$pjLgZ1`ug%7Or#>V@g07Sn)Mk|K znshaSLgpGj)+YoLdrsRCuW8x@sGYEz=DcjU{n>+r_e`rixqElYb)>dRG&PRE`)h34|%%8JL}PGF;A5f zslh{-e4ACN6mm?LbgF+Zl8OC}2>H4f2iz`buIX0%9M?hNqcsN~TpndRe)qd0-aq1K z+Fnbt%eLn{_BSenZAecN1LKQkFC9FvaK0737GG*@`T+t*(P3EPPg@zRUeN3<^%{@8-);`y7uL^J}%zUeplR8#;S_G05$tN}Q z!8KmWDft?YW7^qD;m6ji&;52>Ej;JlXK>QLp@NgN(w$bTxhqvHW#`bQLqWX$;u4{n z|_!yn{&((w=5)8!gYT&qt%)CN6+m?)3+ zs&Nrxj($+r^IBD~lVzN&$GH6I^U=!u?=fr-GxCu6qjwozmfx5x=Z&!JH0)4nI+3OR zJvXsrU{xWWVDzjVdTrCaxwCIx4HASWpXQypbO;vwrtrI*^xNcC+SZT5j?5ZmoIcdB zgF6qIcJ~^cV(s~&m*`u(r!HYU`?4d%Rn5}v+*5YcPPeCrUx>^Vt_7C~ZI>6f)9<^9tI%AO+$3kgHx(`w@lC%u z@IrOcxm#NO^UM6~PLEtT^zV_J^r6?!jPyxuKHu0Tb5~A9K{qqy1Hb2s3x}sFICj0S z>}-U2G2FEy`XLomP!#rf5F~pAJ^Tdw9C8X^j;?hY^~Zf`NH87523xIPZJMkiHcnn z^OdlS_BCpsUzFk#+yq5QN#VnIUIx%=pnVt!ZH$LwNp z>-l#NZY7I%wk~!oj$DevbwFm%;Z+OdenO=R7xK%n^zt}_K z3{VNBq8g$7$L{C48eC+csD99Yax7o&N|)<(6kNlFt z;ps@w+_j4vyVoR7@x=K{?t&Al-?FA$)#Gca6FPO&6BA81y?d?=N+Du9-$yQ`^v_;L zQ$wdi^FM3sNi=U))@hhcUhLe?%3#B$_Jmv^-OF&WOuOfv#=fF>CG{@8R*{NA>88RW z{9`Z8rUT-#hd%Y`-BjNe^z^NmzRXN-q;iM3?8Mt$f}Q^7Ys5;zw?3Z!@;qU+b>a-B zuXmVVu5Ewg;{(i|+ImV!O0&;Dwp>%IaA!^s-`6@Iz)G<`U_6GqtvWP|VUb>4k-Xt^ z#{F}(V^TC{u7z8^qcN){=3yxNv5u9DCwD4#bSah- ztdQ~6wXUF7pUcMD+A5kmcf$+c2m z{jCZ~dxC$+His>Kw-n~lWWRtN;YsuPB+c~vY;^F1u?-$y$!`Dtyh_2VueGCo<<;bi zHVfCCunb7eoRMy8{K$+7^{~YKyhsN9ZCiUiJ}}QpZuvT+W!tP~|1I%}HLd+ExpWBs zAX}|g!XR$?3+p$tJFt7ADX=AZs#AJr23DNw<^s{rBQ%*3?L%ESG=14Tx{W!F5Vl*P zxk}@Od|?Pa*$j1AGkcda%Zc)#nD3)}950wpN!e!la*Thn5u9<%4Ru&`)%e81Xuy46 zZ}827nXaRTH(aO0pi#-+=I!F+t%fJdlw|u@^R@2b&mS4B-tv-L%1cOad@#2Sd!b{X z+W%(v>&wDnF;Y)=kV{Gy+Q}HsHXC15#7w~L)gM1tKfa3C>OQwsEwMhK%F^>`0R?^6DqR=- zO4)cyB{||z_ve5v(+Z=Lq3CX%GNnrHOuYfpfJ}m-g^N^Bf93^*ug$$V)91Tt-LHO4 z^!mb}Il++c@ohj0aYEg-@(1j07n6SzT5Z{B(;EO?S1Kw5&jzK-l>yL}U3`rdAU z!0z?Ft_@OH@git4Tz!`CP!_-P-sR@Yg{=LJOFzmVTS!{(&8@UzJ}LBZi;~&Y{JGq4 zQfFj|eu^Hd4ENO~NEUwwX@ zn{)Jitzcr-&3A!8+_hYVW}i+C98yjh#8 zz$j5Ei!aMrs!NcOqT1{-Hx7mXoJyIPKq+C;YJ2u(ImN&$Ec|xu=%gz?^<($GSkG$K z?b7uP_MJw*H}Efi!=7d6Ggl?B1GC^Y|9X!D(~ao40{i5Z%9kehO_`F$Ti@Pgc%@h$ zfAOgF+b=UiC5;(Sakm*(k*oyguDu64dY|7vp)4R>pRV$aQh=x2I)CcpRj&W{zXqt#P}Ww@STe za3372Nc^}L)jK<{$W^?VmDA^(R=(H6)wic~k74JI6f&G>I{Ms2@BNd5%a4WQjY;N48utZs%=*G)_h(q#MB%yYA-b@%WsY zi@H>#msFwBV58%a&0u_R5V?BP?pQ~^TVvqmsfW&CMd4g4LSv_QJj>cY*YhZ4x}ROY znp)bR#R{j$x{Uj9XNb5=GiJeT;&vYFQ&ZCyGt`i-u{AOeTZHBq@fe987z}U~N2^rJ z4gSPaPhDOVd|@VQRy#AMs$R)jnL*5*@_Dpf^Zx12Bk6Tj+#}uN zW*@tmzEugdytw+D6L+sq*r!jpBcjHPT2nLsNv^@8KQ7v_>I;5&$Ec+E4x^!K-r?go zq0@fO>^ymkuWIzi@4^NJFTYGrL_+q5IuDMD2mZ~SD8bld|XkEM&nhsvmZDw?v99y5H}a|mJR28 zbNAh(UDGa}%I_a)BYY{JE;<;K^Av3aQq105is-WzXHxchbtZNmZJ}{q{WEl-itwQmiL8b z${u}FtF351OFKtMRn}(qjla@b*(caW>Mp;l`l?CV!YOXBw9bU|b+COUQ0QT4zRPh7 zb2)Sm)ohxj5h`9baBeCuxDwvwbmT6MZyCLk6f!Lf2ZVt|uyioM>_sZJR`1(R*Ev1^!4Rn37c^(i!Qut4*8woMy}1iz z+{?E)M9z}8%JdE`_y=sy=6`5<+P75oQYTaT{mJWL24;ICU0>|zdw*}r=+$wOrtTlBn8H6VhJA3rtnJYA*Za__)W&;&6>@iN9Brf@rmH( z$tCvPHFN!%$lmTvF=@LFZg%5Wu_*99yhtE09`1M9lcuG#g>>F}nFqosEmjVVi{#`2_?O77ufZ&T83t*WlRftbdf68DMu`TaI$se{aW?WI?nhun_>L`{3$cxT*SjA z9jSpbSYC*{M1^gEK0)oN%|hJqtgiFEPM=g?3A^3r;T0MTH0P$={$#4Jc{$HUdigHQ zq#{f(>{XrhuD3;;;wjoo=c34>mD}7u$1d7+Yx53FrrFm?%o6Q~8*KB41x)il8MyaLww}(@$2n8mUVS*%VKrUz_`w#Pot?)V&Bx((`q6jA{G`6gWfEVQ z3&fwDvzk@$M%;bRVH3!TYq(TiAk0|&93v-x_yJ}2aWmge?wD_E1sLd^J)^z_Nzn^W z4Ox7q;!f=dT~v%p+ms`xcU7QYj2hgA%DFmqd%)EBeb8qUYT3ywrT4E=os+d%@$m3d zG40#0pLgq#yShjylsw4ZSe4gb&qEeks~_4W zO!N+!d+d;~uD+A3DkUayb=2&7S6W6Q{{C@Y+|g>KD*pX11TK8w5V(2ieDN9S$A-Kz zWY4dD$Dwzbc%y98X5+4XaSB3nzPO6r3$y4c1x?MDx*GQ*NIn zJ>yf)B??LBP}3$PwY_^>eMi_@jRPpoy9iILA60}G3l=s#Zq(cy>v+Z0z-M{95}DGw zk0ohwNeW`WGhH8g^8*Jbx2J2%w|NHttvm0evPrG*uLwGGJ|Kme`9ykO#GrP0j4e5~ z$@)1IKL4;M(^j=*_kx((?#@Vr;bYE#;nuN-9zu+sYA1R`2^qn+Q&P@7O}7ts5RzBQ zdRN+=JIFqxlbadGfZKe~z%ZAW(%)_6ztn7EtR=U<=A~eNUF?7$7f6ljG)b;SvF|tgijwjw(R9-p#F5A~h(}ElRG*#0i7+v4h@v&a3-BqpM`oVo8 zK@Qp11_x!d!av5Mb~!+1WLPqHx7YW6AxZ1ketX`9jZNGAc=?@{V-0P@SQI6q^yB>O zCzcCVv59-(9cjn=8?Q_qn^7vu-CVi#(yI9GI@sM)3ks8NdCa}yLC<;hU#3--Z!OUM zd}FL>+ma=7jzIu4iig4@Z|z z*2yoKoD?`fltA$rcqqn%Iw%_6im?_CQqy35v@F@e#`<;7^Db9j&c`F9ol91i*xN>P zuEj;DKA%cZ!8u(g^v7>uJXRs4SaNYH-1(VzSftT$yO5NT{F;}?71Mu6xhFLiUI?=Y$;?u_yfb;(*<6Yqcl={eKJyHPrAxsCLJs}*tvjh!JJL$P<($FGtK9oYoKM5@oK`J z&Sk=cL26c$U8EE5-pLMa*$>2BJ6mqwKrwIY{qz2NySq+j47ut?69;uXqY zwQ*BDGAjI;ZL&`^IHm5~la@%`lc}na(bt}3lJmFZo)NITV=o-`&I!vibrDUd4!X&C z64xR#{b>d3{AvDbu(xl=OmsLSSBzWi;QM-{#Mcs;5wC#Q;X6K%{N)g*`WCjZ`B9&Ios#I7**Jn>dl|E96eO!OT@>Jy=2UeXy8o}rjHtZFg?9c z_ho0O{MHmp&F$$0m$ims-dc z(lJs+PplvNmnG(;y41`xi&pX`zCasNl+~tP2~vWS+gk>-TKdKzV=9wBGOIaxBSjl+ zyBiy~>?#q>5ad6YxzO6RbrbaV{)?8UQ!;FgL)7#rc?!jXN=ZMSEs2wj57Ewr2(NUi z3S9Qsb;|#UTH(HTWR)T=Po#b;Q!kK-0j?>n86f)8tCmfP(|t7eQH(6JJ|);28n zLS8v$zrd_p$#+>P4OnH5-*S3w_6{z~u5Zo<-Xu-uR2oIv@;&Kyjklj*)u~xFjPwnv zdkAHlKQ%m*B`uflGBzb<0TT^?U;*bA58$jZL@glYQdk1z?Y9Je!)L4`<&k&dO zc1<&md@z=dVZ(pFe>~PC2iMxw!F0pm+0yg9^AmY{Zatj%9{l8~N4l@#j*rjE@-O&) zmVPV{iaW_9q$Kb(nN+~|M6|i$&CSvx_}y=N<>&RE4lxzY#Z2HDsBFnuhF2m;?^jq& zYP6KEf7vnZC19&zq=qc!EvspqXk=L#x8h9EIlJJtkdAT#sW&HEklJ=s!RnH? zw?1qotAz`*D1@Ifhnyd-G(51v8)atp$sk<$NqfQ9`M8;7mxb!=i+V0Gq=?0j1jX1v zvNQ@JZbWuq+_ZW6rIcbpRPN6rT1m^ms7NqUpdkOY?jQ8Rr?1fM5 zt-ecp%H^Gsi57ptlU}ub(=9u3N-|Emj#6LXE=FF8DeOKxV|W_Re2vy^YL^tn%Hbc& z+?dW~y>oBTscM!pu0+3EJMw&jek}A=h4e3Ow+fJ&u;0D^b#0SqmUYg1O*RSq{wv)a zuY)x0PKfW-uVy#l$(jtM%BhN=lN zOgTm}*5|Snn>m+=Q7=9r57eZzl=N;s6H?90&J`#h_bo%z^=sr@pkdzS*KZf!Ho49w zGtQAZH@YF9yeW7N0arhOT@!Y3Ct$;lq)c+v!vf-Lzd|yPd70)=|H4#G z$fX-k@2frB+kWa;B4jYVSu9jN@~UrAU7yV!AxL_?Vr8acn9}*n58U=>3Xfmzncs2h zUj95K#i4=};Js;QgVdQp25uRj3s&23kCpdK2{T);wu!D-(OP>2vctvL9|#VN9L-EU zB_g6P}Z&cEv{^`SFzm9?LqS75j?m6Wg_nn)>&tF?)ZoRkf zbim=c-JO`JDdD%+j`DLA>BWNE*mzbi9SGndj7>=YE^1pN0o$^fF{BDQcaUJo-73uSJ{}B3Ga5f9{f( z)Y&oh39Q)b`r9$q7u>bpM3RGRGITABX)H+=^WIBa!b-^8$Jgky z+^d&+?#~<;n=p8>Q~a#qgZGX$0-yG|r#rXV*HZ@twCnO-+wo>wJ&I z+@}Xxd*7XD(sy}v-J7@4y*XlO1v^FCqDe9_JMHQ5v1u~DQ-m$NVOu~o=jEQ1H?;Z+ z^rce1M$7(dCE=olP~#q(!_vbaHkZ55M!d)O-b>cIxLM(uc3h0~KzEh`WcrQD*zAip zIi+qxA6ctp)v6PV5}+fM9&g+XvKDs7_l+9zgm(1D9+bKTkE>O3lk>_yk7@0?c5?Aa z!k2N*SJ#=`LQrBz{?}72cA^-aMFZO5x#pK)*i2{r`=KiDwtbHz4SpS4c`ilejz4~a z99k9_Qgn;)D9WqQVKZZJn4mOK`B2HYnW9m8T05_Wz>wm%i_A+GxgW?gnXJC=C@P!` zJ=Ok>rCn#{6?#M; z8H(ojHpD$P(r}GVe9SLs%wHeqh}{->ztD=CUaWH zmVX~9V5nfTSv9MUS8ik3#rz=3Ii6dpLD`1JTpqo;$x3+hJ2vm+gSL{pi<^dvRoKM? zF`W;li!>98G89jwf3zfCL~$x3kf~9)(?71OK=S$y1zhTzwXD%9A~YKQjQ3AiUm7~g z+sed#b6nOY+moX;$xL3R_Fm49))#KuGGw@!q`XmEV%s;LKJ^_I>&B%nIxQ_{b*xsM zL>ulYj44Wwn2Fo=HLA+glshB3qgoo`hQ@k>75+6=kh>td}mI=VX^Vp%l^Fa z_4YbFCD2VYqS4+|d42bY)5m8@1U{W=IZpxOG zL8dRLir-dSLMCNytK%!4qYp)F6 zK%ZJHSbVRQnfghI?TUk4P!9a+L4)%?S$cw+!UeU{cY^q>iMh0?k7rA*2cJp2)SPtG zCR~eb$0Rxp*QI`{Yx;aLHY)Ml@m)RjJ)58vH(H60C^Zq4E$>Zh0zcw$J^Og}d34p`uqZF+X;%%InPcKcm`nutq2C4eNAMr%jbUVJYU& z_huH?WXNg#TsQMrOG>yJAu`O%e$C*3>q=1SvjmaD@OmY|{HA04^}PY&{OL7h%8M!H z>M5U*xa-AtY>ke7J-N8FH=+4TTdDv4Y52Bg*TmwUE2nY>pM_koVG>B-FAmZ$-5q89 z?L~Xu=&Q2O$7Y%-cU@=#x$|}|vd-yq3HzHH3qN@j+70Cv>Z#ee4p`qJ>9Aa4?r@jA z8&i^C`-Dg3N>V5Z3c2*@Q{S^CDKDcXyTYrlMynr0G`q-$jh7Y(v3;`MdQXs#s^v3z z<%NVm+-z2#*Y4Y^7vAb|`pEgjcQbOiGWZHt6WPrZM+@YPtMf&2PG|6xo_UiUaJje7+zaX);&B&;Z-sPNG?r-c^gQS3*8-Ss-vbc|K}*~2E^LDcOOJ^* zwcXuj>z;Z_cwFIa{F$fsl5GP6cqLxU7xi|#a*w2T!(FIR4~CmhLV2tGx9~U|5^+kN zVi*HkHAf*a5l$Tpdm_({;$y+v%#}u~GAdar>8H`E15xiRL#wOAu#U zmmZDiSaFYSzksy5c4PAMo{!2cUfi3`-`L)e(ZDD!L4B#YDq+GyZ3)UOFuN&rFPs2< zs*AX|Xw$L6Cz|Es%yniuODN4w-w!c>*uJ|zX96DmL*%hQvq#*AhyhhiOOt0thvils zWlAnSbhs^YjYTlX#p`M~%loiLn*7PRVjqxqPZggea8(bxXoSvbD!F&N9Ef8FIaogYU+ADHpcP7W~CFs-&j0P{Q59Yqc)hzqK1nzqy%I!V`L_S+fCQyuA0#>H%7t{IQCuPUA@4 zPGk7Bxvu#~FWeV}dhVEXq?I)u+peq1(-==ms+PLFf2bm*c9FE#+c8W^&x`ycKxL{h z;)2OdU9BIyIkiiPb>|P_Wi2eE7ILliMn157s+vyu7=!p)%~ZpbN8yRK(@^1!D{ygU z8|>gVd1nyee!DrdfXS!bGqJ)uS}jAOveyQ8p2VeJy<4~U1MlsZM|Zsq4T&#>lY1Mo zCDny(x(gVD$U|X8*Tc8(NPAVS8~V=jW%k&G60Igbp(7U$ieGqmc}HduCS|7BZLf4l zve8x5Pgi%M)T1TGP?oH{*EFS0XFn+EcY1z8?%wjgpm4tB5AbVmh0hA4d+(1Cz+AKc z(4JWS=GD0j8D75bmlg5)cEvAtcYWS@Rcg!V#r|=GDt7Ut#WSNa`PwthJGVc(Wij1e zQs2Wct4$3(Y~?=PI{zs^Vfp^i$@+XdHr-^KyGEhM&*Tb;)u1guQI=ZSE~Ij6 z3)fp=-v#rTTPn9UYh@Vd1SFpgaq-$w!H=fK3w;dBwo>|fHZQoj!@OH;>&*`7t&E%c zGWPP!RPUI>^*noT-24LDrgSbl!pcc^Hz7-LYmRI8T-vob^{zpd&593lxf#BFWNv@+ zxs%YEyPa<{`<=3&s#MGK)rl?#w5@2&)YICT@XDts-|Dn?*JE!ua9oy|5e@Qvc0<04 zrDd;PlYIUa>*!Vf*w<|bH)|Xf?K*W1bJC>Cpr#tjlv1A`qZ`vzsg}bWERMG*P;l$&7z|LsMh(QJ<(UKNE4yB zx~x+^i=V5lA4Jz3EZ(Ipyp=^(pm_v4N6kFW@o%|W>USC=8B|P^o-9w}YqCnvsvSncKd84Z$-JMREOvi-mshAD z<@4F%g_Wxs9NH%4jl`xyF1sx2UX-=meJ*M7>V=*St!UHMPVU4T-3OGPDi&AU?)%vI zjjsV7K6QAC6YD0_X4QCx4?8J!O3LHe)}k0IvVG&i!&%xlo6@&xaePr{u$x@^+hb3p zmL$ySpR00DHJy8HmGK2*PaU&ZzI zfx$;92$zpGSlpLt=Xu?yK4q!1eB?^<*ud%*`NVVS_U!zEN5AxEX?8XR37tN!no$;a zxf^o*kgv@2Gev#`0a>ugHZ=Yi>;B47FW#Nx+Z`!)eKd_3N?6BNnzznjQb$_au*32z|A}P z8r-j{b+zih$P&^U_KPw-9bsjzN{z?bWM^-z-@bw75q z*E@BtQ{K< zSds5m?UbjRbE&rDch6p0I9H4l!e0Dzt#fa=CQ6Ub*)3>7u<}09MUVDmBv)OZKT;pt zbkmmiVu)?rOD>nfAm7821;om+@|T0%4DT&g`1WZWh!;ODSIc7h$|9F1;i31-p?i7l zsS%=y3gtT%zrzJB;+uj4BVHYAk$5w|%iKkjm(5?^=oOo=w>;k!{Jm#6*TpW|$4xke zMqg$VI*gg@%e=Acdd#KRKpQEJSFa)jzRA0+&NwPv9bYP}fN?4;eIv-Avpmk5mDL;U zyK&#=)dJ%z zHyy6`avwYpr)$)Pm%LmTO_-O@JiRO%sja8#q_CCb9u@xJqj7qqmeCQdEEZGt?f9be z@s0k0Q?-kXOtbdKYqq6G)kr=$+%b`R@mZEhY1|Op?@hohLv^7eTUjs2phQyA5)7_N zKGVcPF8A>}KOdkd4BX#II ztj5EgVbwit%$J_mgnG!Qf5m=F-y!?XLs0nCkBrARlRMMK5k-;$9WT?FTSDH%JM(U8 z@zq;c(0td@{{H|!K)}DUGFZ1aP`*^)-L_P~Dm)Qw*H*ady=Er>>HA_sFr;Z(J5foa z-+soMqOmI6^x0ngYmZ!*OxP7%asnts42}9MN7anuEN8c#5&`AYTs!1A<=S~}FOo*V z9IxC_7&J@#(;Ss$8nVb43yCFF77s4kRj6gpy6zitjZ&8K8KOkZ>+JL0u`1)NCx7|y zzl^VLtbpWn!!Hk9b&vuYCGyh7DZ*nF2&d7HxlmI*NL5RYo(|S_*V+Nf=9$^^ZfH=2 z`ifsMyol}a3A>%UQqL+EoG&RAOp@^)U(ETnn1T{Bj37~tc%3Yn>I>y$=CwA zhZ3_Dg*d|p;Zz-(nQ<8CT?0R|2ikuROJ(1R3@il`bbz(i4RKzsNBLR_5sSQjE}p?) z{9wx{?Rrh^1e^ua{|)p+^h}Tv*PjIDl8u9@J#ysf@{77hn{;iiHr@=j^zu`$&C8{n z0|YF?&?)NT6+`?UAHdve{Z(^ki?Z6)0LF8@pPW!0#AKv331JJE-#yk#FJvqA?PC(> zC5WKoc%8Z>dWIs|iVvN$<;b(3`*$O%0_rH(=3V6vO_!H>w_^L($6Kh=CeU)Z0E(h_ zI%p?;6G6AKT>LQKN@iBmCvD@1f8u>y_lBl|_RSp9LXZ(~4WVmdoChdCS$(Q_Pt_~z zpbv#wftv(lEnh0MG>&fCkFrn?3^JV!bOlbazCEKx&$9vs#gW~`X09r+o-~gJ{6w8V zE8i`5^rT|YY)UYh?CGmT3ze52u@rU&%~-|}B(i@k8ikG0c&5`(x=)@KwqpH06#AUO zWY;ZKU`GZMA9Z8M7!Iq+ZjI`)e2t;u3zu;ebh*(iodH4kGj+A#nSvi+gV0hP{vl3B zkl)UDA$xiyTaMuMrHU~K2U#0MesQ+=(_RE)AXM~X7y$R|dA-6+b}H(@s=HlpCxLl$GzRi)$Sv*po+sSW2rzVZ+N1Ab7M z0Oqyy(Lc0ek)n~cxOcsCY1JXtkEAEFCRa31cpTJrjzcY3r%?_TEID)Z!xenTSCLgx zj#n~pzA39gOpnQy>0XJS_*2kS=&Q77Eqo2ZXii4a6z#rR)3cyd3+SrAgHUN+GtX=a z*tyz)f8bQNjhHhL`2GO{r38GFU4tS{TYZU zwe;yKbu|G`XpLe{10vml z*1^x3i1~|;T|TNJ$HG^Og{HlfIu>nw)SU*QVy5`dh9xKp_3se*Q{3RWUKUml*B#1@ zTCCtkC-Nok!jXAvF+;h=Vzkl%D@>3pG$(EOrHBUg9(h;ei*5o-ddd1vpZCBWcffxY zGfd+g-bsS8wt?tEeU@Ikh1GdbBg*UDVdWRlvQZ+eIwn03oqi?@N_npjAX+VEC+r}C zlv^a|7%m(_CI{k~qYXzG-W;{|?~<}C5PU-P*7*oPcN*obH4Sk-(1CfYK9@ontcwHN z3cd1gsmkl$ChK*|F}o8mNI!ga`^nl)R3!TMp&v@;phL&(k{&6&AmhXYRrOc3G;-eR zx3zIUD|nX!qoGK=MXR5o@vDRtrK)xp z{Wg8dLM<=4v|d}_Qlp>ie;XC#G1U#o!eoBt6%mec%)l)!D6VjJ+q3u zMXzn`{dNIYYbw*^T4AN1(bqNTSU2$eRU;>Q?O0Pd?0*`B1q#q7K4GV3%p|!hzeww; z=J1SqKqj>O$~l$iGO;${_sZWI<2*3#4%iNuS>B(myI~;Qick5dtK%65vt_LuKz+z4 z6B7T}-Ik63B65?TT!mJVWP({z1Crx90e2de{K$Eq{uV3=VU2X$`Q#jp22E;fL#ymP z;pF%_z%L*2Y*syHQ+#RKa*=hBCZ+3i>6!#Paw?$Tj_<*0o+iE=$U(Z^`!v*j3vZ3b z=%s7EX}v$eLq?sxYsQrc|92*cUa#3Hy9nH^b4m}7>pDxI#UahvnHwR%-4Q z(QyOGdjDO#JFv7`hy5!4%=>gZquF!*<#C7m;iFjL`lULeq44 zzI-b-k^WZ}`y3Zmyq-?XHLdXT!PO;!N8w;V3iibd{&#U~gYNps12jeKoY|`ydWVsT z<}>W0vonY^wlWb520y`Xzqa6$i%DfP*fSvAKqk$0DPltmtM>Xl!VXEcdZtbe)2T6^ z8Bf%sgirvs1&u3Tm01*Mssq!jMT4*^Sh;HBC|IV7kG<1p#=)*%FPI~Y;&;xwn<3~a_FLR?hBARBvU1SkqPtbtXsFjf@#B%J8aAP`y*kM4*;ajC!D^F%h|{7a>(19QcX(nmGuZq^rWRU=gQNH*Q1{{c}u1T1VhTX64lro6i7=32%i_0EMkRvQx?j*yGR-}L)gMU><1Q!eEnajV~o4A!_$JT zE$wEWV823M1#y!SM0~BhOoN2F$)VpzLaGR%7cy<14$*5(t>eqIc!E=@z%N9wNVgosV5!w1TQ zx9ANBkl5P%=G9|g#?V=R=KSa3v!9IE3yg3PJH2vGGo{YY?1*Py7n;^}qo@wL&j)>$ zxm^ClTTZuRRDHO4qcQkVN3YLvvEaT+?KmHUagNSS6b0~VN0+%v@a`C->b~ofE7@tg z_1_<2SE*EXNHDs|{H;!iflG?pB~(p4xgAOf3A49WC4$;9Q^AOO6&3kZRL5V>{uMNv z>KmbERfHtY3!uT0R6TeRKvDhm=;CrDIuTY zBx4djMHF$P+p3?<$-E>DTweTRl0X$B?6x%RZUlIfVC>f(q4>7z55{@?^fAGVugRki zTRw+7f~n2QKx}>&A&YA@g%_uvkJbbx zJ{W5=_D*p4KMMsO?eHz0LVpu9ivK4(OX@g|ErNqpoQHWCF=~Q}IMBPlAER+7ttWcB z&H|_k=NFm8a<9nN2~px8mm8_a_JbhBIglXEifoEX+9v{aqE?>IE;Mx;PZc++CCUyi zET%ZAENZL;yib?7w5|rY>n>Ee7u?30YII|JZb19RW>5JCCXKji-Al`>1d-;nNS^{= zIXv8m;c$wV1FRa{6}jS|R5sV^H?S{mc8#D%O2EnURBRb)u;0}<<8BUHr!Z1U(?mES zN-P3Lacf=Toq^o@ty4_nxpa8lWsuN%lC9b?(Z64OlhLN}!B@WItiR8YX`bxstCs$D z-Xv@h0Fe+Hr=wCY@GLShk`ytBsa(mBR{qYjk^j{(>T0imp4eaY zHjoXpmH|Vt8OPtH8D9CM+yBS#At%{Ca6rB`(QD&T6M36|y(8Q4q){qco`G@oDxz!M zF+I1>8+86>`T0O|fnZlRjJV~AB<_od0UH6caHByfF;I%L2!b_@DD`MgWd#ky;hgPU z^qImsu6e=NK@1k~Br1W?>bB(HaVmRWQJ8h^>Vh$ZpAp$C~1qDESD}Qz? zjovq=fh4V3e{C<WDk zpl~utozCYXy0xV>XC~ahHj;>}4%a^cjYZCl7e8dsd#CyRzr*2oRDq54LSNEP#Cl>7 z4g}5GIb?)1pgU&FMc(QQ-J7+GJrq5`xspij<9gu=d7pAn|6$ZfUtvmp4}9yqi}$Qv zi+Bun?DH(SY<=l>uk(*tmUAMF;PMn`#yqqyO@YzP`s=hB9s-O#vvrmYvxcH-bL{(g znK1bk0~gEpwm!stFl=(jqt{LcOzptsGeRrv%NQ*kadFjI2kN!km>PDa5HFlMV+s+1 zGZDkIXIS8&@do>6{(B)We=$Y#x(80s%^n)Z#hT0V<2ytus^w{&?5p`Y$quPWE69gh zw=oRT`%=@bu8YDQnNVRmF^%G(ed<2k$zBl&`5%3$V?5x->6$XspK`Q#2|lf81{{!FoS%KE?VC1?4G29<6)7kw+$qLMpwExI7@` z#>r+m$QFvWMlHX@K(tw8$q#suCmcT8jjvLmF7 zXdf)K5hDwsHp_B+{97?wvClq(!md~l_!ejX^#%{uRh@xV?AAilK)!`Wzoe6usP{+4 z%eBtN^lH-@s_#6e!omVZvLlmWOF8`TB!ncK(av<=uVYGde5ZN-O#xPB<^6fjf4f(C z(St3L=YPbS8BB6@G}2`7AE8ZF2)44ID1NuMpVwY{%H0b zZR;2f`74zwc9Xk9Qg%KUTJiIC8fh&GA`YH;DftQK3f9b1N;IzR!eyPka$J!r_T0jc z-n_3l*yiukQ-oiA<0&?ruB}A^L2?v1DMlJEk=2-GfOD|Nc0jac`{EwW22Tg88qdiU z{)`9#KxRS40zR!3U(Dd)4lm_Ah(7%mENr%*4S2rKO@p44(L!cABb3p5`<;P=l^Kfo z_Ee3*4rF4b(!EDMdo+_4M!_L%>L~t^JnQPj5DCtZ_xV{CYQtjts5AyqM_&>>mlObd zwTFF|JwMC3;=4jo9b1*Vols4Zxr@vqTRES$`1KsX5xst{=MGj)Cr!DJ&@>xvEBviD z#ypUchvHz>gxT>((`zdp+3lO1y>N$CDS`fJ#FF;*u2g7*aa&){-Z?v?xRg7>2V-)m zzBd>U?#6t_tETtQc9i!h@&ho!05l^~!8PqOKO z7}q%?k7rlXn)Mu?G;|_)FEZT2gt&JNvPGCWnTCAFV;R277Ppjc$$v^L5K~zR86yT- z$kN1ynN{&;MM>=b^Ae<>7Cx!L9)lW808z>b8? zCQI1as@qKbW#oWhx*zmduXWJj4t~-h9wL-EIO$vp1{g~tFJt*HIM&-Vy;77gN-pXp^ zL$_BIeK!1+MKsbVC;kxUO?Gj}632}x*ne}T3~l;@DAH$CxARhM7>sR4ponDq6{N;g zF;+O1_4@=&XHPFgl*vVfE4Cu1VhGC2XmLe26@W@D95I6gC5sC8SOLN(?F{RS>}8sd3D1J8+@@vvHN@MHe5iauR|4E~?h{MUpzxBPjV`FI z>M3odWTn#pc(}yRopedNMeX@~N?rsEUoZx6=gCy>8|isb7ju0`M6*UGlE3YP$%`AbJ(d*D zqYp|#Xo$5}W*@SV3}Rt#x0(gX47ZA4Lx^WVU-kq7bBE{JM%=85g`xoUyfm8=Lg-CZ z2y01;0D=p~YXSzl@h$?u61^0yFOB!&51@k%o^$8w=D~B_`Y8y1jS|De0@^mZT#6n5k7~#6^;DTAiX)`*9x4A)IWCS=7+AWm`CQEO) z*hW{6)}<5b+_;OObX^Wz+l@mXYG;Nq+@3mQy6XENsVYvqO2|*7M zEUQjj&rOGr?tM%6bMXnKA!|hcV!VIW z@z__7cC}fkDBaTTiaGuOm2+A$u8s@_%SCtBDqKVNAP=nS4(--CkLF6{l)KAyO#D;P zgmIf~Jw&HV;9sMij23}w!ftpVJV?;fcxjCU)7q-hW>;F?u=RwlR*ZEyB0wbdO4BEk*pATon%m}23-=U4$@zZs6n3h6jQ zGe3|cK5%$okD0~`%L~Iu2iJ+k#|R9*WsRWbqKhC|ap65{;hqk9@zF!6uyqr;dyC4i zsloM%{$P8_POsdrfC78>oBE2}R90B5-4KKh}bc^z%r!fD*NkRP<51Z)@c#pP$^;+hVH8NyWt=jn~Wih`mQ&x*9w|$VZ8u5V=x&Kp@KnN zOCOk+K&p)-o$8 z0^?5t`e?>NjPk$R;)f@MZzBF(2O3|6H+T~B2 z1AKEHyH|B>|3V_vWnKwDEOrZ9ge1f3+9ps?QR4m?j+bq7SIg8l;sW$>R!#qvG0yVm z&wwP}B!5Wo70iff`A6SDh_)C037QvV&o_Ib(R+%n+b7$yr z)e;~h!j3-{=fTS8NkHHL_UYRP4Rc%xvTci1b-42JD7qqN)pe+yEMc zCLqxe5E+tFgZTh`O}b543~{rqOf_k3oJf@rzCplGmz8c0ej%Dm5WW+l|@$27dAi}WHVdvx+BqYOD%GbtawFRLf_wPD@~-B<;xH;?bauXooPwj zjKIR_2aQp~>4Csp` zoFf;C5(ELjW+1miHqa|@IQB3fxEmt;(RWs`2z~_1eFzyUY&UtIeu33bq+b!E1MCH7dTfexhPJV+CR z+SH`1p`h6B7>2q%*IUoFYz#DbKQ>zn6#Xz9TPuCngc1#yIK;=01RHB9xYMB9kYfmcIL9Lox|yJ(PA*S$s2#itQ0W;~A`qi; z0$;M;=yST0HqF#kvi34{ulbbGw;vTF` zu2H*%)AildzOyopd(Z|}a(qKyjl^NyfB+7M$1qdbEeQU1ExIGQQ>OM^vf zl?>8cFQsRmG=@l*SCku0kAqa+F853rp6LCqJ?y3j*x_e1WHw$^rj8pO6=XQ#)V;Yn zUk18s0|?+Cir?0@3^@4CD;t4GBVG2~BI&)z}!dIo(o_OTxsk63Ek z)LK{xXA1>ZA+SZ`BhiCVA^}=7m_3>X4=Fg~-7tQl(=-kX$@2ak<(27VZ=EvQ+?4hY zx9yaMOtPFS3VP&D&r9G^Wl!q_elEUk?+pNxCM0V2OX<}tw?Zja@KgLoO5M1AsAowN zuFAh~#ZkJ~IU{SY;IH2mn1uwmhSkmAiSafs!G1nT30?KFy$!ayD|qTTc1~c9C{M#| z7^6weA>|v;0SiCBI*KLxgcX!J=mwwiu_k-ao0iQ|gKft-#VyD z@QlnC;&^M1S(Y_^aEZs&&k+d*Ud!QK1Rl;Ki_-VN$?O7SiiJL6XLff3GX2Yi*lc&| z&-I9#NPUJkjQ*B$%0r1W(}Mp)}=|9 znk`C}CuRb=(JplOUL7O*#59A|m2!MyDuEUde_4HUBt%>Zq~2O5qa_>V$K0WmL*WBs zZHX1y9vSHJ`OyyNgYm00bI?@i|2VaV(uRWPb9|Obs zu#A$8P8)?==As9F?ctT!d!guO+6T9eGRviO8t@?ts?i6|mW@$D#73Pjkb< z{#UBY@&iHl| z05{W8qm9m8+L74y#BWtL8iIr?)^6i-Kvf57K*5 z$s7U`2Xi{fM=R>3*(tp#TIw=9j+#z#Mw z7O?jQzD!>FrTubKwwgC1r&3$sfrCj)@jh)kD3_C51cBw?F2j;u1Kd4{DQRNQ-kA_K zm^*R^cALteZsGBiuk-zci!g%LMpAgLA}NC$XAcz|mz5PRky82;0IxvOdtLxCflEt{;+ch-HAZ4kj8&5^LGiw|(869+cD=F1n})zMnK6Sg1TH-9tt5@Ux!UN=t1eJK!tmr*7Bo#qR@gy?GVJm# z)c==G%69XQWzL~-$9f3#%WkUx4gZlDHm>VA-zMhlP<)>&y^OWDD%ZqMB#7(;H)n%z zkz1GJAq`QS$B>&)N*G`B7ddX3b0sRf0zD%Yq$2@t5C4P(MQn>T$3b`H%?L$w_cO+M(WRjxBg|*YQa9S-2BYlyiEoaK*)it?ZDp|3Jn(X26o`o)tp% zu-JNuBB?rn1bHz~ls8Z1-O9~s9^YiYYTGHGL5hc6C4X99>y8lbvD^8p(v;~h*KIlM zzvzwN?nFHgzI-TN_$De=VTp&J9m^gr#<*2$!^t(wOmB^PTURU97M}{(54W}Eqb^`_ zfmUYJlBxi-*8{zZ1glwqWgCPZQl3ymW6mjGT(lvxv;+*n;CrxFjVqz_143;+n@GNd za6hq%$)D+jiZWmYmgzjiM1)Qlqq; zMk*T};%4IT)zV542_SH2qF`b1_?W#ejTj?+zvCD9UZ7E#S*rc3y-BdEr4&L-Qs5@I zDIR*pkcutqMHB#WQ@UOv)cp(DQgCLivnkjuWz^zY3zX`mn}t9Z!5<~z77(VJSQSeY zq}c2#4MrEO)*DxC3{94C{>Ed7IBG8c;(8#og9d9HiFSN02FvqyUfu}+OP|LLI0E&f zf)}I=kLRotuKtw0(UMmDcci%sy4f5n88ULh)AzzNWS{@pat$u{OJ^3Ff#HAd@=@~e zr)e+cv#eXy+tXwp7;$bmsNpTjAL%vBh$nX9U& z>)NUGutrAHFFGzpxLga$cHmc=<=GiR&LHFi_%ti1sbD|~U5~KhtJJ0lQ2yrB3Mx~Z z43L!R+X)ze;v_epIhW3V(*z;402Z<10 zpCEicK@guwtNEb!f~oBmOcPHHx>(8FJKnj+r*jy?lAsQ)2;YkZ!(D<&#${TNU0EiW zzntq0iNdyXwJ$j~IP#;!gax;ELD!kWa9pv0+dNMuQ*Mv<83g69++(^9HMK59#o*1O zZ^nN`&8-!o#W`9rhL%v1bdo0u3%Oj6+}CD6v<*At_HA z%ha5@j+xfy&f=f+ZjKfUvaX$F3Y{b%d2NS*GV3Frda2kLHL|SB7c=WgA(#Q8?YAT5 zD0ughHO7VDi-37S=tFQ|%QH;=jFwJb1=riVjljjY9Y|GSp=EU;O7!Ujf)J=pDQphe z)+5^`(KCDnu%zFiv1`h3?H&Y<7rb$bGJI(WoYnHcBjpw?>QLuYoCMPN)+)g$vrbKk z?lO$2lHB@J9%mM1`A9tt6Lc~;~Ru;1F`Fpo4} zF?Guz0vBfY*PTZZ!h5ugPd2zX);AYFJ=w-$g+!%gMKKqOQwY!cu@g6qx^3Xalv8i} zu-+5Q`#sQj?A=U?yyr8p@`>|PX}n{gIL*qri!6VHDHtOlNZuFP{kC&l5qH&IqwzPf z0LM0LXnB<5tZ!WqVcXjynh{$9b^f4!uXvlrSnUY;GZ1k+J5pA-zlh#uqG!aXGt6aR zWgRfx9K6GP;`H1CShN)}E{vg+Jvj*YM_JTaEWx-7@e(nVPB*hO)pP6O^=YnTVQ{6N zR(q_UX(!NPzs-hfG>X?U!1mX|npU4jjXKu(>a->(l1rn80v;zhs|!XQBrJ<1s|tby zfhz14NEmHgYk+RAILjo@qOMYdqH2^D|nJ;&6o9_opv{ zIS(aLeAQRVbaF(lX(R|;5-|-z$7`O5;2;$65!hx4YwQx`E~?Kogtrg?f2mS5vO`_1 zBBx$91S(*0>_ph$)Mbne{Jbp7?q-duN1ZJrIf9XS$n?)1cy2WC?%AWOC6$l(TF2Q@ zBvk3?PRWf8Qf=Vj5hYb(We;dNw{*A4{UN)KZ!tG7BPMcOG;?zoLVA{Lk+@wmkzpoT zYE>i(m;Tsa@Bgt>xqS+ZV^Y^7P&lu9W2n0YsQG%LKVs{_GN96;|KXMD+zW7vzTozI za12aT8{H>)$d$3B^{6}sqEl+${N$VjY(I6uZ+6tgpvl+|kR<01G z#I%65U1`hD$9&oXnw<37apDN9dh@*Pq2VB3ln!g;5o@kXQ{@=u0O&w@1E+B>Q&($r z5y{Un=Gn(ud0i-`@6zF5MRr7{zQ70mkCpf6i5sDh?A|9l(1-MdMl*l&HzRS7jY0LI zG~ag6MxdW#yE6{J=uxgGt&3)GTRdELemhrTspy;}IOJ6Jv=hsvcdc&M&?FyNgwlbr zwoSM6n^8F`$s%zuL{c8B z{i0KHiT9;2o@c7n(~v``Txii1iOIJ-(<3-b*97?6tAuFKUmpv$!ER~SaLlFuoojQ^ zh=?!N&G;0*7pNy~zs@(YuRW3n?V?%#CD}qGFR-jc`YM)znqVKshScZE-EA#6SB;@ z?e4C)P}3+xEcCmqIpMv|#nzSFCSx6HeBZ&C6oB$cz+%?u*2YbBjhHj*c*~R%>A^l% zPmmA91~NK(4KDS))WZ?gw}WO!52H?>JXhyd#il=KuU7KQr6Za?HIlYn1<&O3l>kNA zZk!4Z7h80W`)ueyUj~a6TITH$Xh2_Q(+edP_P-92m$!e4qeij8FSm0`)s9D_Vk>`3 zpm)iY38CdO+}lc|Z`(SET~Kj%XM@*GX!d;+E<6OGOPGu8s^d@7@YEe&5fHN5Sg0Zb z0yUZ?un`@1S4Fdk;``a&tIK^+p$w5^NUn;@nNnJ7Q#qBen7E{bpNEhIJc<%d#MMju zi9-iJEVtg;mEVF_zis0INNKmXnXMtUa0;Qu!f6#~b4QPU`pM%@--kKLF4u~B)^aIlCn1Wn)RHjCHrA+5_v z_TL$R2%8VjLm$zBod#48Fkmh&v{8o;(g21hGXXp_$h|JHXOhLrw$P_=){suysWDPzLY5dzsm z>-2<0bylT3Q5%Ls)j9WAt`%R#f-{kc>p78c)DIp8QV~Zno+YJ)_FF~fRh;zd&+&tD z^_$F=>S^Gv!XItS2lH(HqsG@*Ima#j%J<+M>r;iDA4PoOjzF&i1>%cEN-OidT$CgM zy<!L&>awC11RK!%uDuu)HJp(i91vqohQFPZPGs{V3=yrRuY==Z^Y zs7R#TDJI~RKW<#8*%pH#mAc&MUs+Ciz!1-2@u) z+ES{e7#vn1^7T1#Ruvq|nF4Jv(tc2Iuyl^>yEmrrtdl$8k6Z|%m2({{s_P0rd9bpF zT9NY|bws-Ukwocka(h0gs24!aQlu;5VQ1igHX0N<(XZBco>3~e``aCE11Gl@sr2&d zR`yBn${oQ=l$8?B0U5POQP*4*-tv_;lcXB$b>}Z57~CIJ&^mp2=iWC{mR6oG@0pqS zkUCZ7^JF(vFtILSD*WUlQ}6G!`xZ5|CA#=u(aV#=cSdO+h&43SXV_-ih!SwJqWZQF zSS>9P#F@fVrGl%bYc+idKx5#^G>A)%5zqUR$~ucg_n?vI^Uw?^E?;Th8-hmw`+7cl znPRGBfltTSx5jnVvPoHu{^E-v;Kgtqx1n;}$=8%jpm~Rmr|=d>>pJFLA(-ij;&ZlF zdoT!-Qqo*Hs2EC%FCR#a6Ec0*iQU_3KrbuPY~=owIbk3p&WoZCqZOqk{$}7|Mpioe zn3gd40OWTm!iB}1#tR^SqE6HDBGVTZm9?bNO8 z)FA*tG9KulH$8g4bt9o7Vk5m}J8Vru=6rnX9r?(}^p#(}N!z#RGZCjT^JC}4qtU8K zZ7}0@)-0|f{(-RFM@*?0O)ymWIhdIy(YNuXaggmsr4CgnusCn)$}!}?s`_0ebls~o zDC&z1lp3y2>!iN=f(}i2eq?SdwVx?1?0)W;opiHVOr}bdcL)}6*nUWHdWYCD`Zce} zXps=#O0PG!Bem?7dxI|8+mqktk_Oyzl4O$p&E5reRIpzuIp%d(d5`@!E9I8qRJ|cw zs6vbVO=f3K`cnm8eFFxNuB{gbt;cRoJKnE3wBN1b@pZ9mRUvRW8djlpmYfO<4CIMn zlz-|d)B@yOfhNyhcI%2p<^byGT=t$zPJx~~Q8ZIqUY?gwvQDlH4t6k)1O+&uZ{tBv zwucLI%c(DEp`~$}jA|au6JR5s{po}r9(>Y?c#;g0gZcRkJ`50! zaV_M7)TBIk$7?8o0^nGi=lQ-!1BbAk|LUMmFEvm~{K>5ve#vj2YKO-X*BcQrpLr1n zN3iI16E%^BLQM+WcCeSj7MY7@wK&#X$8QQ`_xnR1M@Yh^r=(x`MoYSauc>dRQWD>J zqn8N?ddPbc{@7}tRS=z~NX$KSJegS+5^@Mf&U`YI1}cX{qc-+;V6ODj4K~E&b8ap% z?d)jlxyviPZ-#=40G8FAD68o!1_dim84)dbtY7c}`CG+|%Q8>Tkd+!OKbG(}l~R|f zVj4);#2`VKwP9+LR8S9{@ppTd1yx#c(31K!A`Wu2>{=uP!1NWwAh)DN+hfwq8C*INE;un84I#hI&uTcm$KTtp5yyaAP`C$7m}w2+W&{RCN_~CEUplU zORh4Q;-nu$77NOW>86*5I*=BsF5VU{l|rW02y?B9D3Lde2vm0L`4-mHh&m4H@>Go(3P)0kS`cOsWEu!L#9oKLG>i!qPm5g-|PZCBE9hf^Ct0|*gWppRs%+@ zrH!JZ*r*8?FSOKnVy1k^vI6bg3oh|7Vc@MF?uv`UMd8o46H1@>shnSJWJpF^T08em za=AKNc;@;<l6DImR|0bm?EIij^sF95+7DxamXl%hpe#PKZSP8~c0x zRYq!iokLrB&B*`HIpMPiaDX;0OZfC|E8NFNT`t&})4egZa2j`3AP zX$UDaWTApcWbYM|FJvPRk;Xz-B=v3!JRsyXOY^ktCwo-fU6eAW7 za8R3pvZ@#J%<_rh`@zgb=OCp{)Co5LKT}O=I~Wk+G^MAqFvmm1Oi0zG*pAz;i*mF$ z460?`?BFQfGh;^?y$j;dWTvUHYbqmJBbIr9o zyJc^P3B@fe&f)JNWj-rP2K~^2?3-p&GHhD#WUUbsBSG@Utzt&6k9;Bmmo?mqt<8Lw01)xZ>43yhzI&bZ()oVpp2~YwcTJ z8Z=u8jLRa^vz`DsTkb9YtuFr|zci|fW5)42i|{WZvgpjCE+h^@l-k~u9QLX~Da`Lt z{Ca&%tzby_vi0u{|Frkqn*Xm0HV*^frUj1XsF1T{Q%RRmk^(N}74bF`n=>TD-tbUdCn|I5_p$Qp)9>R)t;qRAO4+gfg^E<{G0Tr*G60k4DG0(p9Ua7zQsx~w@lR< zl)MNO^s42FB`qFk@o&hakN>FX&5kln+3;`a8{B|3fR~qW+iNVD$N~xlpbm$ar>8g? zi`zM%S^yRul&zEkzlwW!_I`VK20!(jmLlH*Jcf;c@W=++L7Hk@i)ae^PcZm(r1iJb zp)g8)4JfT3Z5TPdJdvfcYSblr!h*_KGqV<@;LME0-?NiwZSI ztliz+!>wdfvrCk6zKiLG8{IL~A#dTEP4~h7RnjUT{cc^qi_|?I-nL#N;u{l~#>-k= z)!Y$w$L%R96%P`{zVob?sp@L1mSP59QNSaopsZz%%HddH?*myb@K8PGu8wdn>sYbRgfRv}a|NYz z6OgpwoyvLYJmm%}v)$73@CNK)CR~iLFHlBVq+l52jOBmzl-o*c_tVP=xF#SdLgP^) zZhX{eWFnAulGFSvj+4x6*nkp(qJ*(!U!%ocihVLM?(3p-9;hY9e;ZM(MFcP=>d3dR z>WV|JwSM#Tg_aBI*%?P|6DbGPj1gH>w;h>3Th;`ab!B1WB_VQ*UegL!|R=?qe<3a#Nvdh(z`6F@DX8kC}QqzAsc5f=g1KP zByxJ7E`l8I8VJmYXw%WP76fSB9k(lxOswdrdtU@!Z?)&W9};6b8XDJD;ahk*4grAX zvqXsfFG>h5G)XkF{98w0ycTC2f4J!WjZ|D3OWjcanXmn{^5hlcr!j_8?VF8Qa9I;` zd>Cd<&CXRWS2-l%M7goQ*S4fKKdWAMWR>tu z#w|9Ss4f&}wVBc_>9)tegXPh2hAUbha06MiCd#KQYSxC|XY0oLmpBmH>5=wl%z+d; zf_TVO@R=W2H1^>V2R}2kO(AxzLroNxE|^xh^Ou>(Q+FV6>JhcI)z}rLB5psG$)Clu z`WCds&_FN3NGs+ZLT30lmQ5=jD~me%^1WrcPX$GpKvDy&B-{qKu1(}y8-ofxz-u9Q zRskH8ZRZ-*<^Gquoe`!de6R=V4+WSdYTJKjfm>J$v&9Hf)zzmD#QsRLfJs-o{oiAj zG&6R>1;yIug=A&rY|zA4+UfLEonBsnTs8G<12D-a3Z=&|P3wx_`2C8Cy1(^`Q$PW% zI{2XYTdgKJg?e^`%RcgY4ezBjNP!j@@n!ANi&q%bOECKuMh4scHDkVgzbIUQAc|}t zf{x!3{M-AjbHRT>a$+DRz7MJ9x;UbUOr8xMH3tz*X6uZoH$v!nP4Z}Xzq@DMSBHP% zpu8#Alz{LNasJ|WoA&BrzW8xuzE0Wl@-Sge7mJME5WwgaU6~58>yi}UIYc0Z)Gv=+ z(ew&6Y1n;TLXWMIg9}S)9~h}frV^|6xX%6sMf||ZyKoC5%{^^t^L4$y1DG^iceCNyCk83X3+BfRaee;wcM-JLN3m)T!Oe1tY7y4 zev-W^<$_#X#{4;2$(9AnUsJF&Vi*r$9o2ljdfhuGe&8&eNz?IA=%@GcM{jFaZgQt< zyZ&z89u}dS)!l0#7`3JfCOQfQ@Y9TK1+c5Qo@-8CK_1Y(Y2<1kRYImBaCkLx1TCFI zdi!ZcN7#c`GX|7qF~b)NOnrTJ%XLh&xh(HSk;LhH|jDI;GJ zk;Fu5I|X}m39rbALbJZZKI-Y$GZ6Q-H3ikf{?hHYaWvI?!#J#9_K-oGNpYb#$QBjuKl}J-Q z-c#+z|E(l)VOWH`3gocfmn-+pbK$P7tcJfToKBYNFS&(g+ol}2(6teGQq|uMgtyRu z6>JT^x8@1FF7#Ej2`!X6Qs&oyl=n**xvCXRto?r!dEEn436ARbKc|UA{m)a^Sb9uD zbk@R>WKq#OOC;d%L0-#`Ox4iJ z=HHB^Hmz!g+MKdx91K2PhcN)0S6B{SCv_Sw^gfwA?q_=B`*|Lh&kMS|PH6Dup{xmH zA1NtmLLg>}-Z=$Ig^bZ;b^tx;J19`;rV+NEa!9375No9F!N1huTGyWW9m58d2=ajL zRn<2f@A<5f9gL_mH%)}FL}C5gONT!3@ZBNT=Ril&pT&8Axtyn7_`k}&>5zZ@hMLUm zV+9n&ga^C|Z5Kol3V17cs171Y;{aeyzY;WYbUf&85A{QF{fwJla?N)nc_1D>nhmCt zx`BjQF0bi~0!0|ao<-)2RxkUqjKm;i+BQ|Z>*uKTQwWxj%d|tb!rs^Y{O;GbdjOda z2}J@GMW@z3m;aq*$Ac(eY+RP;UlQ58Zb&xZjP23#rGhawsc}q2H2gTv(H^_$)@iqLZ6+ z@$J*gn;uh!q25jDnFPbMg;#HC3DX|?NBCA(af%J?(!e?y*Sdhu+O#9Dvl7CRQps&o zsR#BOg2FB78Jz^}MWIHnP~y|vzd;PlFR{3`hO9GAF@|06wYS=rLJ&Oj-J=AB*DowI zs+Z$Osy`h|NX<`yA7w}ioU4zysuvK$T6d2Ax3sDT+eDuVUGh%%*u(HQrB=0Ov$}Yd z$u-GCM+cNqOS+7^r`jET4k2vh>KeB`=kvUDDewdO&Ei2HeJHn`wD~Bjw>D=+* z7V3_}>J3?HEX_cpT_H9`4T7rsIq7duw?#HwwSelbBYuvaX|B};)4``>^K%}8WQ_^V zljKd5Wr*g77zEL^#-KAV;;sNI;G;|j_~2cjDH}s0_0c@cb$HT9BR*l zrFud$%T$A5?(PQQK4{NPMqM~r&&8?Osf}gtW};Q^4^Gi;Y^LmWGCnjXDsTah2+d(# z)lLDfF1?QI^|a`0mGygDVslKwPng|w}KwjKziAd5RT+8E`g6m z{3n5H(oogAwBuv!=S$%RkQRbJ4&?^9h47og1=~y2IF3?)pic0f1CH_)PL?ATa#HI) zm=F;g>A|bNLQpay66J1Xm=8pHRs2om;rEUVJZAhY=nAg|0E-o(P9Uz~Ta;7S0#k*p zs~JKZ{jOeFlLa@ER+{@wG86B?7~mQQE6V-2Qh`}pq%4ZC`lB_TIsyI@?>u-7MsTCa9HO3Llxe(< zm(BE4JA)+MTO&HFPYL#AP;K*A9^2>aj-Y5~Jf`(4ECU8;&v2N!3 z^dy73*t^`8`tw7l3_Hl0|I zucRkx;al^a6WHBLy!`uG!*m&%r1$C*Kst<30k9`fH*FGQIXBou^~A}eUZ=Lbdlv)* ztA$Xienvcy`)Rv|<^b&*aWmfNSxh`moJ@AC{Qa+aB$b4(Kp1bQ>+1w7eWc?%Inz7C zDVIzd%P_+MlZLY`f4|7SfKAG@p?P~L5qS%c!88l5tT@OX;9hVVI-5R!!QJRsemNfogar>BA`yo&mjda1|}@9Pu}XW10aOtqa)1)M+?hl5GGhjyfQ4aO5CdDQDTbUE~Fi8_>AGkM;Pyb zUKbaWVKe2U)u^OWMtM-~2+*C!e#9bvCan?BTe2KTyTnvIw5V5Xx)@=ZY(A}{jCk7Z z&Y>XCG)A_5p5r`s8*9PEG6YQMXc2ngq27@|2)^@<6IS+tHHt2y(vMSgZ)BnP7CNIK zER07QBijua2KpVwNZZIBd}gRu^g87fIc{=X?8Fu*Th=y$FA)bQ{3cvQHAjukiD^zq zEd@jxyf!bjd3?#lq~@zE2&&iy(2CT3rK>g1II$g~Ta2m^DN`7G)vCX6Q*@s0Hk zED>l|C}lV4<|qhD6qTA*MF;pwIT7TJTixMm_hjAL)*5`Qnfh7h#VcB0LctOFT3ue| z^{m$w3FK*%2Uy3<^mAw1CfADbIw z4CNu>-MR#5KPb{gG_rBcV47*fKA&C0yb;93pzK5!auVSI# z&qzk%2;N8BMiM~D_CtI7_Ch2zfwghD)fuw0l~Fb4tC4Nt^K=!${YLFXw=zRB+zoOW z)M~iE7T~scJlP05=7CArjtDezwE|>FlDd)jSe_<0| zpheic7hPTB#@`81lN@Z0n49%VwY={cf#wXy+Ef3&-5|mt4}>gxayqDIVB(VX0Hh)$ zq`Vped8GudgEhR-N{)e=JPZJTmwvE}aAPx|lc;K6@FBH`c3f@~#rT9!66 z5q8(-C&voC=|qZ!hY}*7F_MX=fAmB4?FYSX?&JB)FB&W8@1O4MSBKf!J zhaw+y&R+p^&tr9?z@!HsE=ZF8nApd_)3PAIk`viM?WL%wbe*byLB&xlL(?~+Tc>FA zM$T@2G?@~AVjPwHx$Q}u30-+b93C6!##Ch(sPSt|ms!QTYj&i&WX;NNEQp}7XTlnJ z7@uG8G=o?)@>9MU*3GX9^qP1j*U5iN#fKYmByR~aLrBNUD^sY@ zd81Xw&ohFOMWVDAfIMSGAZ#`#MN`5vI5$VLS;yCg*FwW~ZF~+lJ z2J+CUw+!n$vp}O0S;yl&?POV>{e6k{CJ_H(9{2!bG3GY89NAO$>mv8Oe-0i~Ta?}U zPeo)#HGkM+d>g3KW?-V2DKb|}D&j~HfvY1x$Hv4*J(1nE{2qtVR5HSR!UXW?>^LVe zgorZ_&X1ZMeci>XlO+3nOa6?xV+CkAt1ekJf{*6}B;5nxAh+fxR<3z_laIY_ou>i^ zgCXz_*1yn%OP$WHt4Kgx9G*=Kr$fRaIflMt>jpFcaM%;0M{Xrs&g+e(SnPeMB{hP9ln`8Zw2yzG zOFC9Meh=nRIh}93P|^j6UOU%fj=0rc**lQp!AVnf7Z)ZMwM9_}kgqi%gO+o)K%h5D zj=qab>1xc`JeFwmo*;CBC*xc1Ym=^Yw5Yoglp~WNg;o9#%2an&?E9KnozuPhUf_N_ zDj6<9=Yq+D$l$`S_j_7H{Yx3?Ry&0QzpiaUc%}GRnD>CC0eH2*V1h^AC#8~;q8Ij9 z2BSVtvV3T264=y3sh@(~f_bB38%ewHE#pwP&|%={O@G@%FwG>wFacRU$p4h!o9V(6 zqLfZDye)jCG!s0!@*!K$`u4?`Hk$1wp)m_u8^Vv8B`Ga}DN@(6BIn%UYH+FE`x^(D zJNVsqGYUOY)C_8y_ud6%k~Y`%wus0uL$Pei{m((yX&?J5?dhV5mX-4UL-pn$lvu?P zD5ygk8tQJ8O=MOsX$YR%$ly}tl_1H1RHieEB-nT4csXM8)O8luJz4`k8XrO_j7+89 z*7c+rrCP2~c67?IlQ8X3*;6L z>z-f5g43yNF#c^oF>7#p{|Hy)rUMURz|h~lQwO~{p*DXWi55)4+onS}dm0eR{a6V4 z7DT@XkDOF(OcFV|yuMH;+ytg<;PfWoyBkHytV4=@`AnEWo9hs~?ll+5m~TwwY$&*H zX0J;<6&b{y?Cex9+z|r0Q}40IAL~5&Yotzk>&uM$Xbe^(3-rrUq{Iob>&bth;5}VR zVKO9Xd5K`k(RS`D7clC|CCg;CYt>)`AQjstW1UDzVHdu^Vr=QI#FCDI z<)<0N{MzU?lD%z6aNlRz7CGCy{Q!7|Bt^eimL{U9glhpVic%7lz+qF zvt#C7n<&h|Wh-oj9#x59O(BftgOK_GG^m|#NiYdkKq09L-Q&J|!&H7rBqMSbqN?l| zftKDyUxioXgA?JZCof5QGah`xPXBKOdV}l^nX{00T|9(QG@%Yx2!}&W@7&3-or5|n zs>Cvb9CIAVAlgcfq;uizSXB$x?ixIqikj}=dTS>bvAEo#h1(Y8um%Agg3}-wQ&9Hu z$%p(Dj>MN)Af49$5<7uKS6i`s+&CTqvZ+#aeW8sFHGOJcZ{DA9uYhb#Ut=pENkx%X z2-IJYzXx>;eEVrRic7NjZ6)p(JzLX$CD;h*J@53>T^u!_G3)B&Ye}oge8(J);q&tta(x_J0xcsx5I>!N6+Ox{oVQ|uo2Pr9NDiA;uQ86y z^D1^d09me9s$Y73p>u{U{y`z^;iFE^y+6S7HU^su-F@>g5$DB*J$RqcmnnAa_u$PR zZQD=G>A>#NvSfyG+zbQA%lsMu(VO4U4tdl5Lptw;L9rR)AA3q|AIb3~ZNS89YeOM( z=94`jTq|_+rTq7ZRfGL@8C?-1Vg#)7DQ_tfFN$m=mJem8+Rf0Tt5^ zE8)kNWSKb!;O;z?G`Obbj3k0w#aY9t8CWK&?+gOO0v)00S`M!~LKrCqk^R-Lx{c%m zi{9IQ6IKF9nH^%&8W8NcC-5b;^wjS598qxG6%w#FGc!Sl=)hXHcYZvlGo#%6Mi^b5;`6i zNChYk$M#O|qf6j*kPNXY^`^yFRJzGRd^F6pG_rzH3uxoBr(^}x_}G9TIf+}HPlVz- zttKMQEsuptx*2s=pEK=|GxAJZ`U_S(aC^3<^o5`$LPpO3`?dnwY@+w4vhCu`S_V-iVFp*oEDF5JI4^EBgVZSYSloGpou^8^Wus#fvY!1i@@MqV z4?%cINg^e6!{RGX>1sPDOm(;|l%I;$?u-J*fS)K9$qOybBw4IEH~~_Rl^yom8I_OJ zCDv)z=SN0RpC^h^OyZjqGbovTMieJ>Ps1vl9oq=fm-v@B^1dfH%Kv6Qj~2&#a74$D zdtV#g?{`h1P33;7q5t*;t_0SjDBAGx@t8vSYA%#V7vuay?1X* zWTLx%xX;0WY=^CDWAD>c?sjR7M8i@TyYB3Xn^XD{59?sA>$eNj{s@`c@=aYk(#mev zdJx(dI>3dG#3Z~c;`Ip{&iM^V3UO-=LJ_iWFtH+k;Qy%stp_*)>ShVGYOP2ZXyEw{ zdB+YIZ)4|ZN_Q-5D4mw5Zujtg0hk~9Kwq0N4PeWF6qHT2BYojyX%z>>NNd@INh`@L zjA8GmL1*y+eY?O@uoe$lR)3v7A5ivcmOE;NSz6!|h-l*6P2!TDxRLaqBgaxG?%?#7 zpoY_$Y@hV5-GF`!?vU87PDk9x;=GLjz*9CPJaJA!#pl2eQzekL+GnJ7<)m0moMe&> zC#v_yFAQ32FX@;ZD`IhQIq3)b?oF5C)gLyH7-h64j9ad#+|-VP503Ti4-^MEq}pDoZ^Oc z=cRJ54QLUqV+G_FkPP;J3+LvJ^k&~DANqxyBH>#F)pRsM@fXil5qduB1IY*CiDW2c zJX#f6mUx1#LIURPj?S6!MXR<^+sD(ZD6!;gS&?Wkf1DMvxYw5#xr{_XSk(Mu)F;1Q zdJiIaye;_Z`?mna1{-mz`f~K~TZZdpcA3O1f@y9@Y0|{uMxU|92gA~Wt%yO(P2~#@ zMbCr+1*Bd|;i*la#}SGR-Ox_W<8|hpH*YIdD%->5oz`T1Xb@K0wxDzE5X^VnNhJ94 z`Y$F=s^86MueC1G8qmnnsztpp3%8n2|3Bd$XxpB?SeSLO!6(|N_&AN7dI5rJWMBgJ zZZyWhdN%=f!ldcQz~5wjlTgfubqAlSHI5n`&@Fkwik;%3f&=zW;l1qZ5?#y`)V<(5 z7i%{C%+8#?=0kRMe4H)hx{0s3Vq{Sf?8|^v$Qx&-2GAV=1t?)S%|LZY^dwbCdk()47m0~-iwisG#F`V$_MpE#VZa1 zaBRpK_!f`3&`sL%IDqBYbWxXj2Q=e8)SZNZ|7PbcS&3B8$Y_F-GQ@m(RGI5qkFA6n zAZA7#B;d1-%*Ov{b&Ff$mbd(e0E|6L{vDdu7ck)q8sZ%ad*s>KQ71kMiMdG4JWmuR zY*iRE+-dzY~pb9?sT3S0?y^t~3nYzJHMfgH)OFHs-O?md% z8HSc~ucC$qD>LU8a`e-z=OajN_ThUE!NQn^3P))gY98nn>~OO`%tG%c8qkr?@%mSt zp5Ym3_R=*JeX&X_@Abfz2e2CJxjKlgH890MpjZ}5TS^Mn%M9UXWm@v*HMAx`vWNWW z1QmflhKJ%I&_jd4Leg4FBMO@+U#BosT9Q2=Uf4hBKBDIRX7sIJM;EnS3T*8Q=PL$m zWmyNMFl@r=acV14X-7NVBD6dvcv$a6516FxKCOn4(p;9}#00qL8)dbO` zJn}~7zj9F9fIqH$Z@nUaj0d<+?c#6Is?MTw+I}|_{kDUX3CjIpxuywmSWzmqvfBQ3 zH=euaIYRy&nW#fQM~A7(*Lppa9$ZARV@k`sXH*?g;(U=6{-yrG>L0JE4Bl1#~ok#E^XpdUtG0MQMUIq&eHyhMz~y~<(^ zVWgU`p>A563SIBWrrVzd9qkLk_x$l={;wH9ioj|8_0$?g(|D|!!OgZt$YOJ_Qy)cs zzuaNps)PreG<983i*?fX5^6muzYlqZtALmGWPI%=&?J|XwHv1-qm#QD(X^mvIs6&3AN_)65;t=@+b3@OhFxcpx!~}4O>#eS#Ir_C zn|?cgNiy<8Mc&KYX=u8vFdCrPfM;C8m)C&|KBe}p+IPD=ZhMY^KEZAy@!;;UJq%A_ zQDYvcER74Je>8;M{1sK4Z7K49$9-Jef)|OC!b2NFa=6Yt*)dZ@iohzPzkKaG1%Rbix*e|zErkQl9-Va-Pd;bNC7~K>k9E7lBT-BW z^EE+dbNuvT$}%s-B=Ql0u+PuKURva!__~y36r;uT!5!!IwfPkQ1E2^o*fNT&n(wS` zZXn~W|Fl$(6q4#i9Dqm9j~1EAzvCWg78w1!!T}#oLsO13mfFwuktzpq_&}xdy4C6j zOQo$31Nb(imW4&3rm=mzDm!fMH)_}iT$3PVC9JQP6-joNM9q2efN)QKcnfzm6ECv` z?Lc6S0$h6xiWXIWJJ%u=+oonv@Nb{k z;Ny{aoj;0u84B&1eVAc$T=0K#)pO87=&UA{pEiB6#cVckn%Y#%C zf!IWPV7ZW8)Ss0SJU0{vr?(`)&Z`-wa*@gcuKHAOj&2Zt9seJ=k55{;076_q;a7ub z7s_9G1-ENts#;k{v5j*ViaN-`E&fKLmdF)!T_lR7X+mqam5YpitB`owzGJ9@OMG}M zS|SLqTG#<*h?Wwh_>$2Y5T4zrp3WHuGdkoe?%~~p`6h!)-<(TOf>}_0j9N8)_hIPX zWw@3VtJ6XZKVw?MZT$-pYi*q}AE;2IB8<-LCeg-nNa~W|)!g_Ok-F6YrY;%(`sO8{|n*qJT&P;)7NuWZa3^)GaBdCm>LyjX6n-aBz^@sv|+=0hp z!?HLpS)T?QIrP@~V<3Cm<#+`8G~3yp0|n9EmVK&30a{rBmy5p&&SI9tON=`tpLIp7 z(@iDLmNp~+$c%M}TW*A-TSZ;3C-?TmyBQ3pa!`+h0p2f(R)8a1Qv%11F_FLt0MYd@ zm0;=^_qSGvZdUYQy@yp~CAUw-bEuMkQ)V-oJ!5u!m%<~@1-G3g7=WfG1;_W_!<-u` zeRjc|mE{wbRRW3RkO(tKhK@IhAk!^g2l4`~vl(AJQ)x)Jdo$&bHHYQCS;Twi3Jk)S zyl5e~I-fzwy^gazBHCUCS+OUfxe2_2J}4fsYr&l$j&|G6Q2)*Hb{tKQc^)%7WiIM^ zPH$E`Xu~#u(aRSH8;ibu^L#auu`|rQanm>yaTaNDI^w4qWfX1u;DfTF9H03WJVly- zOf*!AB9vL=q-s{~n-#6KO-S@ja%HV(=O5v3X$z!Ww<=(^9TWOtDfwjP5)QK zIz~GnjLFQ;ujn^M`moLxVC|bgP#l%+D`k5^B2nTe+7fi$Q)jk!siL+Qs>f&q2P&{; z1IHs|>>FKKrXGbYcE6PV!vtI-i2Xfhf@!73?~DLVF1a^BdC9(Zj#Y5sRYY~mID|h@ z1sH2ngjKY~8H)dvVMMfXArSGKY~0M+wwED6`tw=h8>|cpA@|V+_QK5^9?|omEO06I zt$S2xB7zuU^BEt%CM7^3iIif?^Wa1PbpXn9jHlV=!G0Vjb1DrOdbFl`cE4$3SCi*JK8)j{{W-J28qY_p=4N z$@mW0YU}~PX%Aer-wb{hwX}s5YQ-bt8W&WH=6feL;sbBkf+P8gSMjTZEMM!cD4y4OHYL*{XfYx5JrnqBUu%fB!Or@&9b$mF2h*HD)OEGy6T{Aa}NrYOv< zCMpU&1(AlV*&?gj7yI99=zy_YT^jg{W!>gZA#!o!)_c|+)qbihI}fsj61Gs>p|Hc5 zN$^xBjKzsvl^PhI2)QAz<^CIyRoj9KsNSOI#(Zko>J*y{lf&L0v&4v1d;H2j2BMmQ z@uff7tA~bEDi2o@gZ|F~%fH+;(K{1|w3%V0V5zm}g(c}cNqgR7?Rxt z?FdefZ_#`S_uH?xi%kgJkKw228Cu#U-LNkaMKh5@AEIz9aMF0X;VRE+^OOPEw5DP_ zT{IkKXl*UIT*tZBRMVbfrtx$(X{btK31dNm@{9=e*v9*@&KGDIh2IgeF^_a=>0`L` zr%Uhu9O01o=}P1Xa7PQBj3>u;%$Bg@a8bXy z?dJG;Em^Mhn6{%&(c70M-|^SO_X>S7JbLZa*TKOC*jM~F_FzcOI|^yFY&P3tMs|yR zj_!ZYbE9SNxv?_GW{F9Fb(`6kvan8FW117T$T##?aKF#M4;$P>u6E?9)+nD07$!<| zH-p(Kubeo-rDgX3fIY{B;?oX7h`9|Ni^Z2Qi#w=az`P1f9>CnH_Iwj$2`?jkWi~zo zH6fPPPnUs<&L}GP0Pa$O(lBaXek}3jmsH0G<`CX4C7>bLL^7GDt$7^}S8v3u@}}tJ z{Et|`mZiz=SZ0s8Xx~}<{!wBw@T7w?(>;@w&*V2ztXMHq`MmEyo+0(UOb8P^wr2f( z$ene*WG-TGNmQ5MVw~eFFHjLy{hv6`m|PK4dkH_me-RtY>_d^P z;Oy&l>)UAXStIICZuu4EzkJ~z16NMQYbWzLRxEc@9INuap2PclJqH@f#aK{>aV{5} zn-ycr5KnFDu2J_gkd6YIheOAqf37k}rkz&45;&jegz5ums?P&Fk?` z{^y_B2dYzNQB9E2|01Z>U554V5hTWWi?mxYra zro7dnG{aRzh%#)N#?8W`f zZfnVtlC*b#p1dC@ZIwB_aG*U}!PV&zGL&+_et>Qtj4xgz2QV?f6(QG|SkD~0tl&@h zc})<9C$A-a5sk_fsx@@9&G~d9A*oDeK)BKk{GW1)R++p4oiTLi zcjUJ!-Au6wfdK=ZHM}8Orch3XEBN882g>{2ftkVvP}+-AD5T$#4G?Rk0S7%3Exnwa z`&ln3hyF~>C|-jG@H_*3rN%*w6p@{>l>x2LO7qS$gz^x#oOy5)QG(F&L+_EY0iZNT zgBpD=b8fsQjB)o2HLTR!!gWscC>wA4n9&bhqPas4H(EK0M(Ji^#!h$%gd`LqIFP)n zG%7fAntWh~1-QcH1dM~r!Dm=PcXS;~GOmssJKe|O^&Nh}^@>$*|oR7O+%ur!FEOK0Q~^c zh*aC+0EnDtq3G(?*$(9ANv;0DgS9q!CeU=YYVE3fB>#>QC!u2W%E@NThwQpdE5jO zlmj%`DAQ8M0RLto+%8mYHvsW`|J_c6LZ*PMWFooI0Yj*whBYG^i9T z3;b*2@b4tnRQm^r*>U1}UjH+=cAKz0I^ZaIAN!dKLmu?UKfnHSrX_1KLj)K_aWQ*?;x24l2b~*Y@u+6y=*t zn|}qQm3t%FLsZH&G>t<=(wMDUtyMmp<_GKQ{XeJv`O-@Sp*i@|3h1IXIzWB?I9%GX z@X1VMvt7b`*+o-ihI&YH0_y&?N#V_C!B_-#;}r9%E~bKe5H7Q{%UNBeULP%K{U)cJ z(SH`RDv00S1^4sCr?OS&XEN6GY=Qzi_j>>h1{F0Zh!E6By~PC|IT`w&8f?S|oADW6Q<7&gog=b;(x_5Wj$K z>-_*^DW-l-=psuu3CK46tZDgX^hKNXz95H4Po95XAkTewcj+~{mT?qwfQOZX%;E#~ z`^bNrCB&}sBMP*n*DWToH}tn*GScVC_=$C-0D&)wI)(bCZ@#`W%1cfijil+Ic09fQ zwpQMgjf`WG6I||!Ib)VT?B521+)b%uC}Juep!u?@gzV{DnZ9v4EJd#?QhK=6XEbLT zHd-{J%GS`5xYRsUwz*Vhro-#2|0^^y02rPPhW@!IM_JrswGo}w)1`oM85E; zf<33dg?Vua{H<6`SE}Uk0Q1QCEp(i&ne#{$$A+uMd;&Jy;N$){TQdef&&(f>gvm1R zPjmyM1V4at@BW7chjJw^NwbJ`7I9Ec;`>V8$&uG-I-I9$;s{??L5!Vj+*iZok@N+& zgWK#o#~jX>vXzuT>IrurauAwd9WIXqqPE;6G&N%4d^z3bdk`%!Q#G`@9Q$0i> zXAX$}JH!@hsMTs&0vuS3zDnZ_-Uv+rmR(0EJPajpI4QhJx)ZsYBWq5t9@tlnJEly?Wt@ip`*8X?OLEw0)RU5c?5x zUr5Tz4~N}s{c)vljCY@IN3&dRV>p4ihJ0t@A@Sa#Ex%G=Gnf9^`BEQ-%CnQ5eVvV!i z&2+|?zW;&b=15NrBSz()e2qvwmda2*Q6kL*BGw7+*Mj`cZ<+^D7eKgEoJVk1*Pog# z7}uy7QSU+7N$#Vjuo}mWPS4c2>($-loaYc6%)Mdw#XrDq1LoVbrexw@MJxW=b1n;a zlD%T<_&rbNv-?IJLCl>6yh3M&bR%=w)W<3gOf<8I<^8JGia0iuOYyb+sqS+^vv_g8 zygy1(AKl(q34uiT>hXn^sV1-8=|ZV}Is`rjv*Hf6ApzK$LdD-O>5_t>5eQyp(KIy9 z+AoCJR1x8U+0&EQBQgIdaQbt$Q}sY%O_{Wtd@5}35DgalmBLXwhZgnEYFnu(%msy@ zi9sXn+1sE;y|i;;PiB0};TL5Gt+(mFti6C(q%$E?pv^z#enL@)Yx=k*7Y_~U3J?^F zWGJJ;lKp<_0)koNs~Qhe|0Rg2L@|9I4zE@RyBdONvv2EG(`3J}$SFnO7bbtoa107i zzB-`%1He+00I+dBebAa3-r7?k`tffc%IlpCrgY`2cK%b@YQ&r$U|MMX-Iw~Nm=5Jc zpNCa==SLeH=+RFxSt^M2Yi}D=3V~nD-3Lu^Uznfwpj1%bLK)|@(g?5~!C@Dv+3wSp zB>(;%;xWPeQ3FeLs|ZcyK!Y|Pi`zCf-8H~b_ysjrBVMOIWqJt_BsS*M z)=CFlvuTs{f8p%ph#b%sy7{ zD0A_fPquo3cW{wK4gUFobAC2d`(>i>s#`N9IJ-a|!)vSU0}K4R7vq;Tjzqef?}P{& z*Ws>gNn^`XwPY7U>&N9uWW+YU1>wVQ$q9d$bALmUCL;mHd$mqXm3R%C)4xBa0&FzR zS!?$auUwqDL&x;!gD1UqO9c5+Ea8A(Vvb_1@kucx(M}w;k=aoKxZ-|WR$O{vu`#a0 zLQMy6&xOKWKNPT~g-V&6h8gA#&yIkS5*2J(+2Uf zDc+CxSP1LpEa2*iykh_GXwPYtvBs$@{EM`@+!Rc_c(EP*$9uBmlMe?^AnyEO5$D)& zc2Rw4On>pcyRqS2u^|ctzVM%iFV_TS=na5MSDLZPxmsKmq@CmlWO9Q(S2tDo?ruJj zE7zgdI88sbsLspgU5p4QZW+z-Q}3j;?!EYO#awtrNZdRlY7sFGw$~+4_T+Aw>%Z9v zu;w^fEPHCIDl^CiMg2=%`o5z?0s@el3Qq9KA}WF8?~v8?s?cY?Ed$<3#=^w`Yn*rQCmk+ z22|nm7O#0(ipi?TC1!tSk z=(wxr=?Ir7!j%vqW+QEiVbuAxMOJMVx7LeEYH8~ZlQ)ca)ZMMKLb}CqFkX&Ck1n7! ze~jH#x3d^9lyi4iNjY}0_XEYy_S2(y%z`m{*dpjoSHli)u8~Oa$%J9&ITA;BE&jBW?^8{>Rc>8;R7}(bJOP{Cu}~!rxDhC26$i~@ea$8 zLmV?spyQSPauPp#Owxi&I-pvR+ln2>qxyOUb(e?o5;|xeHh$*|N8W;xx%~lCW_=e; z+PtZNu!@eQvTaV9aVQXdHU76kxmeH#@HEgI8lXQ^?YQTB3oOv#RStXMJvK)2?%n5L z6F>e}WDzo~f%Mgz5EN1fi6D|7nLX&OQ!m_R%eA~TaUDY-oc$|l7f&e9Ocd=28}}^Q zPX`DQ4eoxb=IzZUyvlv^9jg~Lb;Ya>Mt%76&m~I`_@1*0a zeYaTeS{q1Z@IN=lTWG}ELctqZU;DtQaMp!a75J*3r%M1|Lq(5kB=PCu!$=jr=)}AM z#IbnQ2169gC?)XXb00~J!+}}sQ8?NxO({Vqqg}8yOmb7 zir5TYHk@!$5j}TXy-eo`)`j?hJi6j_7=lNw#h;WS z#fTvY&USSNuo9iJS_pZDN%$#{c&tnR>oe=Ph7+;m6l%N5GI^Dkl2$j> zl>WDG-bu1ge>iN(!%_Qzg2&^|su1q}$HG6*hlDXRYt} z+zW_EnnZ#Q%jU?{&x<(T*7o*M6N^eGK7H*8CggH<>ELIUC`??1UD9Z-qi?}BuczCD zI|O_8n={MD4tBINAP=Zu)E_e7K{$yB$Ji~>#HM@}Plaqs zOtIiZm%1S1^q!t0YBJktYd&5lIccqmn46P^*ARaZBm7~)q{FKNSK zI)P}0{_od(yv$ML8oIvu#%PU6ER^sdJM?347u%!#6kPbtLRTU{FI=H>&!e$0d_n$e$LIh-#lWYm zXIP+_wxYc0dnO4RSY(PJDKW_rKcMr2#y-MJPT+kJnA)>zG_&8^SD6VU4tf|j;?>YH zLUUCBO7G3@S9+2Zq8;?%KwE?Q7CtPs8-JA6$ysau7rfWx2e>%0PehkM3x8BV@nl8Y z>|%nDC0rCXx<^(9MdNg_e7fXV#KdkWy1(1O&%d(t6J&t@zMc&^806}8)n(a2s+fk6 zqZ3z_le+N5*#Jau{YEw@HwXVw*avV5jAl_hT`_9lrBI=74Xyr(m3^ihGNwVpdFe*a zPd+JV>9XX0llNrp69sh?7;%;wM;@PaU+LP;=fIEpm`ZRYgT#JZx{N^pHNUkODfsr? z6lKFDEZ-6z!Ak0AT=q?6)U%PFjZjuh&_|p6es4Ak8FkoMNp$ODZ;JcNUX(Av<@TZc1;5BG@Q%NdHWu*nZ z36DqX3p$HQ)C&A6kjQ5x=L7#tLgibq&o`^iS6QG=jmc54W($U>CPBnM$-7*WH0rV^ z3Il1=ejCStfa+=zcdb0OO1v!6@?8eazyd9aZKnS_5ORiF1K-AuT)bcY(Bn^SfrWv` ze0_vX%WS?X_Z53m_F$*0`a?bTgP0+96uK@~Kd`p)*NT2h0J$J&WD0>JJMm}{eTn?L zzm4}QpC<;YHhC$&;*>)?vuQFA!!UT?7d@{RlPM|aj@arHW4BiKr=d}tpHHvK8vusK zD)_q+Or?=YjUweIm4YzWU&jmfCj1%AP;jr3pmRl^e?D7kzv!Y4pyEK>3PibE3Vux@ zYxji)_?I-I(0Cj6x$ImF6~heqRjd=?SF!Kd>j=*-J<$>LI|&r@9jlCJsCKHTI+hHv z)jkB)%me9OSPVfl986iP>m}M@V@>HzRouR^x{(o_(gfg`XD=CoupyL=(-G+5nULh; z4@vdTh2ZLR!FzFp^yIa{`~*Uo@M72%fy1j#=;Eu+A3}{F2)k2-3-b9opI1#U;v0S& zy7GTMLj@N{S%OE2rff}F)FG6%WC|ItKt^6&>Rmr$_cc)!o?=Vn!qrJ$;9pR(D_yt@ zxrc?qu9-uyckAwc4RlA^If-7ov;R=u!PICo&8~!UFiZ=o*WsG+DM#e$%yaZDhshNLNvFXmpZMD5L2<;YbR#_G5>IxJj2-Pc|!Ls8J! zNj!VZ_6d~90%{mnyJ+u(d-wvLt@;95qjnYO7q1{MMZG*)I$5J9^b3@|lt!|Gd+l^F zx6`_gS&4v>{&MbTcJZqD=}z~`%cGBjFGybsNFeDR04A-xeI~K(fgb~_NE;z~$U0`V z*4vMa!Nu38`{C?on+~Wi)-ozeM{ia|J1YsQEIZZreTK!ssvas#=ZDYiPunKR;KR>tCc;^0`-IIgv+ znaO-q;XvtCp6F_;Ef)d|gc^@RLl4;2Ec;}{beygojOQ<;!0HAtF7W+a8P6^I{Q`skDBC#L)5bxip z_~mNSROf`VF&@w9S}Wv_lDl}U+7mAJ79X3xkmk5fksg(Hm4yhlwO&Bt!@_0yuY++k z|JVVfB2Bp^)z_#k({Y{U0R;N!p`82O`V{bju)boA-m^vqtOq08sY>|%zML={IH>X) zrA9d)XI!NF%jSI5FW=pd{v~ZxVz285wzfG3quK2qC!fK7+^A{tYRfbsb9ely)rvg1 z$o2OzSgkJe2G2#x+j9Hu738H8YcmphaQ*Un#1e(silN;Ye|@}K%I*9d*qX#5GRE`M zz@jNyoT_@-2lU++zH^t=5AkKF{8W^!={2EX0%V8#kn|4AWQs z2e(f?h=7C{^+w-G0HJG$ILKA8oYPDZb?;tsVStHqScO@(&cgsG1-QB{?-n6>11;k> z?^ah{x+3PER%+}MsOE$}O)7*>5jRxWu~7Lx7t0rDhg!JAK)TaYH~5lx%LTj;2dveg}_k0C4>_`G3LQ(J8hESC_eK zmhn9D5KZ=%?a8jERavqJ{kqM^jV8cIW65T&(DVd5~&9mTGs)`GvVaK25Rwg^S! z?en!j*qyvhc(IQ40b;L(o}>fPx+gH_H)Ebxs}~&cU3ZcQCd7qL;c zuX0N}jP?9AxXk5^u)O$Eu>%I%{yV%mqu31Rs;Y^z0-GH72ePKXeFB@2WYZHa{XyY9 z5Vc3u&l{78b`jth7Z>&M=~^))zzxCaxQ267*=-B)+FJuTaIj+l>;GUWi;S6a7dj$K z()B#xWVZ{`leQWZzAai~q^N)Q9hj1!K8g-p=0JGS##rWqvH3^b*;KY7E*^J;9a)o+ z?gXqhfZG|WoHuLd0i$1O+g1)#Zt2%|x519N6e7(P8RhzfI~v%}RCGKf%9kAticBI$ z73Z@$clHG3e)DZRgR5Udekeufn_HL>hs^B@l;B|Sc`AGUwQ&#*cz6MN^JmQ1VfEhB zsLt5S{;2{4V>t9Jn$K0E)*uPL#ifd^P4AB(UfDrws!3?cl)nQ%$Kf;ER^9!|NWZcY zzrIkMg2L4OW~{ML5HLtA=dhk8Fz~H^RY)LjWgaM*XjnKmQliNAG}d};*Dp6DX8C+R z7oK1;&aOjh(iq?2-3NEj= ztsIip2tbiw{k_wXquqDle|2fH?*rdz@M-Ng&DLnQ<7DJb&?@Q06f3b@U-Y*uK`y)sIzRm5F&XE-R(C$&aDlEL%|Aa`mKXX^8r zT?eAO0ig+dORQOY!}E13SQoK=yzVAuu=cD4u61&I_tydfaeht=1vcT?Jsbf#o^|PK z*H`c@;Atheh*G`e;8?6qxXg93n&HPLX`1Z%j2rc*(Y-ILMrSTZDT;dfOxSVsKCa%D znWJHK9WmC3obU4xlUH(_Smn(E&+g)YffdiuQ$0y4#2zr=aSrnR!jU=g+x}Q$J zL+F1>1i)ohwua=%*&p-{v|kWORePa!OqnHXOnhx9?!62&FcqN)yJmq~2l{;aVN;SM zqIGuau9Kxm!b9!n`h5Ieyuy7Y#4tc4}m+1e$u2q=5 zkoL5oKhsC(Bf)~rR05e^VeKO+x|)Hj<{{Iym=(JI1x%0PH;Ulm_w_lJe_AGA2D5sF zkZ@`NlC0_Hw{lhFrz&+bU?$uH?-W)gQM9BnguWBKQXb#d=B3_|%%|V!l_xF$t!2X^ev%;lP^T+5}iv$kSRe6KAem~Dnwn1_q zLR;9Tt~nG_ltA`p8e8~$Mx92J<^DBI!4+c)l{nBue7)xw2%X#znM|VdWr zG^z;v9cM>;9Q$(p+hE&7QB2u2`2jpf9fYvK!^r_4*6aTdQEq$h8%fEFS_Amn+3$XT zU1UkhMZc1)RW>1bYP9)se+ zyw@n3%w7O4ffmN7$RNJCA-kVy*N3d#R?ggA47N71bHn5*{oa%_OP%vywO7HV0xW5- z3)Nf*hdUf3O&dyKl%TU9T?0Ka(AlU<)7gET1M{6|A4@5HQmckx2w#Ij7KWVIlklXS zHW5GKFkN!ZQ1sPfh)2q+H;hyS%tVwphh};|`^|dE5Q2WUG9XPxJM)^BF!h*S`t^@> zZrYbFjFIt2c?uQsDG_Nw*C^B!39~W9CsFyJ4!=k>@hPTeYh-k*gJFEwzjTl%6h|W_ z4Ey5sa17(`0z+qoC@yyu&{EFp#B6`XGX021>h5Pz$*d7tR~>Hqs|W&;Fb#vu;4(5> z|3@QUXK_SbW?1gyNB`<5Ht;~#6p&PdKY3VC-Fg8)dC%t?9NYJ*bYZgH%DZ?TPaE8A zmZt8s36OxX|Be{bHh<-)#FF`@m)qAP-)0q-!6U2F_xIR&ezu|ty(0EZQ*SAB!4nfQ zGgTM7aP4BB$bK8eSTPx0pkCX@nxU8MhD6XI!e&>>@QDnd$cw9=BVsJNw^H?e z`!!|)6^_G~H%yyi@K3OL&WUeD6t0ANzT%G5LKVurt>~X3C8X&E&SfTRnWQK`ruabv zU7x{60-WJp%xrt@%;@Axzs2xRgJv2@h3`EsdnyZ)@?Uv{a%@5qv9^-Hw`K5NM{i6< zBQ*s1D=sn*>*D6vI`VZrDF9y~9PMY#JuTGKs7Am?yMny;at0W6hB?Y>Fu+ZbrC@LnMJR77)M=-n5wrYzZm1iwJ`>cE> z;T7E~WCSq_L*c&)s_-i=M_WO|v+!|dn(#@jVAZ|}_LQFjr=FV>8YF)gMni0H>qDH| zW7eL)Wcqi68C46@m5+O6{*~R=@6=5bnug>s7>>B-^Z$Dg z#-m1cDSdZv3{<^RGP#GyWCGj?*a%3q($WwbjAk6EV8jx#x9;x#;4Ue$6oNJw#6l8< zo=;h*8)#*l7F93l&nAtgaKIk3E9A3@&-(TAude?)(I5b@^8&7$TNTK*?)#!f0k841 zVFrZpy4EKF%fcbVY-6Km+1}}`scttbjzwBM09q- z?jR~9hknN>A^m;p>QaXVYBepGc_SzHLJE`mlYMjKFw)EjU=Ay&Cak4&WFcd49$r7Q z9tach3Oe9wlZf`^bsHe1D76pr}H#(#xCdrPjPA@KLy z$cF;<`TIR+YoJPXsVc+xWwUViB-x1y=jqXAFGQg#m5c|AH!q=-B!Qk`WNzi>?y65# zZEXN!%XZd){xbcm&1&p(Gw%>i{3hm6(H_>%K6g(pQOh;4@!@k|3dfyJueqUpX?>J9 ze^jsyDgc8}6c!01Oc#a64FJ3aCw&BcIf?&rVjv=m7o}oA{AIy-Y=LBS`_-Q1y4>q-u(a-k-~EL(1+3 zOX~|2ar-I~lC^)sG0FD|=tIcDWY3yK4|)p$W6-)6N;KFG%32lxHk6MMllMt)%cevS zeLaV&EUVX(UrjTj6n(J+I~%Z}DYlFit{jm+a1>5Kb?`MOE>+@|g8abTkjsV+d7Ia= zvx71(TsY{c{8|`p;F-zC5UE3|Q4>z%kcptg6;*z-*lu8J1n(}3ZMReVK=_S|C0&Uj z5r<1}!-W8l^A?TDAMZ~$1)VIpOf=!R-a5_fXH|5b&Q<^ipSQHCFTI5d91lnSkWKug7KenSQ%=W z<;BSqNsg(}+-yDmL@;#VU#!9Dz6WczK(DRG`|Sf*_wKIW4XhzyT8X;Ex8w4P#3>`- z4n_>uSk>{q;6lHm z0EWDQ(-aa1q^!~1{c8%*c*DqF+k+Hb4j?P9op<8Beq-xBM|f$WF(|%3CS^2%Pd73X zLOvLhjeDQnW6^{nCh9I|=pGG~hW!lhDeCb#^#ON#0-u4A_Rp)AQzD}d4%Aq3o zda<8#Ga^5(ztU$JW+?nsC8kOojFeV($;{ZX@(@cCAX)WJ+I85dA1!G~WV{B@u(b6Z z?}fMGOqqZY;(CvUK*3c4rAI14Nm z8yd@xYBAD-qR=#S5fzzOzVb^=)SZth5f3)eXLs;-j(_5bPwAMu-6M{c96^C1hRAz) z`(A>BObWTnyL-kbrn=33scT*ryG+^@jqUIXueY*VTv;>>Drtvs(%LP7NrU^pL&|}_ z@OJ+El1Te_1Jqb#S+x7qmXvbQK4QVcUIL%wf@(LsTdueFfpHw1x>P@wCC52v&d$wB z7r|YX+7q`UN9{zL`pQUG_25&R|H){2NIAPij8Sw#EK7zwK8oQi`S8EoL4&P_z836N zbVjhR2i;x-E}#TmGFcYJ((EqJ@=Ga(t|`+iFpw$cBC4~zIcb8ffTb-cavK6EYS$~> zk$JoL5q?wTUu5@*m37W#=3@*_H-I_8b*xnQrlW269ZDKM!=&Tb3 z8K5r*CynM#RS6oL>El(*t*fY(f9(OJO&eoP;%?d3YIkWRzyX%kq54|25&A-zCXR=> z((F^KG&b~5xU%w?2d`9|41gGVkSvjtXRMFy?q8UXR@(|EmnLdDK9>{M8u0QntTszw&Xw_EtBYDMuusHsOM%Zjx0?=;bM{)_W0##{qTW}L9{?S7S1df;{83>$ zQ@srXl!k?q;LGF-;zn}nXg>;-^32c-gXtS@ocA6+%S@s#DZdSSy@ z^mJOGf*9$nePvg3{38;|-hm9L{3X%#2*0yCJXW)d1>Ja&VX+^>eUx7Q8fCVzm!x}V z2mTnc?Wp+ z=9{qMo9?Qi(AWXZ`0G#s(uc#pXMAqgsl3aJA?O0`(D=6XiwcSiu9d_D+V^qQ5`Ip; z&zwH86i1^oSNYEJtCmb1iAiBhkMNIP3eT7yCi&$gs0h92c+Vzs!477Y_}?uZR+6>D z8u59fDyKDt{f*G2;m!o|tP1INAtWJU_v#A6nP>RDh!oXR91z<{zP z5oinOjLc!K3mkv!*v@<^7j`w5FDn%>vd7mjcl|OgexZp75z>xn;;~|$dSBGMh4?%t zJ(O)ZResn4hBO4RqFAqqds{$k?uyvcpS0nxYr4D(R*b>hc;UDw05A~Rpz3xcqAw_` zYQw~A?=(on0D=t*FIQ=6;L@leEMJ~KWhmmt1@%Z={5ZW4HN}X=dN7$E=X{*H`?JIx zRr=TG*X+;zbdg$xrqrDo)$Vq_XFd+Bs?P5R5Bmeg4TE9Y{+h)YmBIVp-XFn-haY<; zgNC;+8G-AW4)wof&FX9jzg%PQ0@0=V(q8k<#V<75nbC0!dL!i>j;3Rxh<|@FBT(9J zxj6FUkB{C|uuACyB<2CM5nOsqKct!N-2rfHw)<^EVV}jZmii9PMPKxe)=oaoP1i<# zzkhfy9|H2imdle{9oUidj#omx!}vpBY|5TMap<@14yU`RXF7N*SyM)jmmAa##Tg-H zbFM-X7`fCCph-6}9$-}$*N|17;0A+F5%V{kx9LOjQrEkTFBE;riCl^gP9Cxqb#Dkl zn{tXwEp=*)!e86#!(Ep5;Hg-qmzN1q)^k)3_1%uSjx`^*S@kSR8#{687;g9dB1}`@c3f-Vgv}GVe6w98l$c(m66hP19!!15{a7 z`@CJVz!;_f&~(>RPk_|8+Y*!%K^Hx^y`idQPlZVCJfrOs9NZ&cRrn;@{B{wYT>U_7 zOilFR@^0+ro)kR5Z`$(?8LGztE>{Q;)KMG0RJPL@{?GRPu>_qi9@C7&eIHo;?(MJ* zu8VTDJW+tE17M}1uhOo1y)qPOHnDxSMCl^7a6fm~^N0AcGqbm`fcpW`ev5q%|2hIJ z4AXnOrEykIN2*MuRAYW?%as7WfAyT`T(~{M%HB`Ss}J_RKdk^Dv3*rv3Xgt_xkHnB zOj!H|@2hZvMmRpd6C6}pl4B(PZQ|v-uFsudfRZ)WmhSC4l1|WcPre>Kyh@r`7LkMU zgGmkYV-_~8AZidq09M$1-|yvsRfrXvI7RLr>4~ViGHLA!!$jPHdKr_xBgQs$T#eo* znPIY0>SNc#YNypmx!j(0L$3AAXi3-Hk$w6rlk{(D$nEhQ%TAR`>xOf2N*n^2Ung7G z-?y01R9H0yOy2O`Uo;p(f=qq$Y*5E{3mhV>>OYZQ<=I&DHfmM?v>F0zd*jw>4x5=* zpn1M0*dEGIGl?|ll3#Yeroq&sv`bKIHpjAQSaY!w8*XcJ{}73DF}F%Ni&tIG6bs}0 zR}7wfIdzT};JrgVqyL!`u+|;Bxa+6B_xc>|Di+La*Cm=F|AshhWqyHE?3?w-Ai>q4 zGRUn-rlRzf>v;w6k5O1f4vy($AlS={8zi_8U$*;{u#yhGG47-2f`H?gBKIc)moa2| z9iinOC13kL?!&q9N2fJ&U0z_OFta6oQ-ac z(Y8eUZ@B2+D_j2?vBDcV_WUhY$V6^K;$0Pqk8biJGpO|%K=@>W4Z#wd(eMqfSQ4&h zPhWLMDw*4EOG>0jkFLO##9jM;c-KT!GMM<}BvR65)9g?Fx7dxEE$o@b-cm%>f(V3| zwTDXP1ASBxqqQ6aPvTFnTD!nY7u0}^g_J?u4$pbcdEp3jZgAnmB@GK$z2E1j6g|A$Fo;Gwu2;31liY z+6!S5D>4w}>`pJ}wlDCGrZzn2?lm@KTuxvUx6+qt1){Rk zJUo=wZzSCY83t)bU9^7p$f5?N<`{F%mx(rC0K9T`9GP*+X@sZ2&*>`mpC2a!(+GMX zLUtQpsK3WsK#4^%la)7@gs&spjbk8=W5ed4KE$xO_*`bl{H?Li(VF;NFC1uPUi%7> z03f^!!BrnmiC>_12!Nc8MI`yLo6?RpA(y)n9EJq<(U;4$gU#Q;6F*sw95IsB>vfC4 zU5)Fas8;^KL2^;q24RhnyesJ254CGLP3{a%WC08#`wI8|mi=ew_rag2{WhNBhFO){a%9thC91!hGWUmie?U;P`dWcCa=q32>JZoYawOI z3);TzH4rvv!FUm>4UV&?+yQpyw=F%y7p%|s=$s{^D8bX~3gi(9o?&q=E%PQkyt@=& z2^_5(@z4-Jj_m7;Jxdv!QreVUGi*8!qu@}7s5m|M*S85Zby#&iXXC@%tfenqh(k>S zy3iJ)oT@EXeMzqCKP}Zi*(Muet!syKc=BybfH;NqpG)2Es{PG!4N!pxFqtEj%LIF) zB=Fkc6)2g3bxAiXh1Y}CKbs>|dJ;i-61j^R$zjdc5nf*QsHZcztjv!IN(vn&xHJ@D z(ANUGP3rkgVU>V%W+wGMBuBRBc+{C~4QcGE(Gx-MW12m}xG^n*Ei{J@8Tn8nG4<`O zR}T)9*F#2u(hN_oSVLSBR5Q9H=OC-v6afZuE;iOm0zW-=mf^;;xVe=(0{U%HvQx!; zESTy7fGtGlU1VC>lHo;iZJ*!0d}c`}2hNeqc~aTwOUvtz zd|^pSPt^V!j52LALx6sz zVyO`mTL@`!H)M16Pjay+&ERORGKM_;^u;t$wcuCBCI{mqh(|4mkq&Eu5J@$$qt`NXF=kRl_kt!cXJRIlBYFfGP61RLadtcEJYZ{2 zNj#QG=isn|SuOri7D7TULD`F5P-nA1rwqz=_ zai7$e>VDqamGP$iA@>mg4}=t%m`d5k&L44)>jl$SS-Mt{K%wQ6+lK=RK$o{%|Xc} zJtOmTaQkRzM2(Yy?>NN1@aw%xYLX7zNQ3-AvNU)nDYVG#QvW-hevcXF%;}A!%Fv?s z@#wngfJD*ehG3C{e(8-%+zXErOE_?{iE^zIzP+=_^Sae&{CN#kDkL~&A(8t^O=ba{ z937f!@S4sX@Judb5{zroOQA;GzH+E`W0XA0UhH+O(q^M8mJWQ=9-@mUm;uy$W%KSY zC`5u|ex>uMz(_84CIdVEd_IFNGK?*Rfr>+$kp)#&DF0-vt4F&X9Y?NRC|19TtBr*H z7WqDgr*!MGulXc!X50y)sfaARBkoYzPF9B48r|ocCY;;Dbj7D9f_jFRB}?e3;qj$s z1;i#=L@CRpgi@l?7non03dSFF@f)#M5VbLm)}XG4Nl*MLIeRCup5>?@FoVR8v}MbJ zyQbZJZfZTUz$)gV@4RH#`UWmdgxxaDKu4d>kHMjPaN2J2-S3l%Q$`9Ysw6EnDW@Mo zU7hIT38#S9D}C|ts*Rq50cG*Ji@pdGn>YP5!MQ-A4RA`+Cq-Rv>l28vv6nBk=B!RtYg zn6>BKPOd9@?R2b$aUEuqRtv`lUEXt8$D zPSqN`gHOT}3_-{(MH_*}66&B4+Q%+i+L*5v-8|9ppy-B-21(whrX;%E_Sz^b-)Fsp zzc~-a$?E(-Kpt8K>m~VtnF@Z^jguI9;J&%7r72GLUNNm2odTgBY7ZF~3X!ziXV+DJ zv50S@0>SB;6YQe-`tr3U89j6L;dW}L2PxdKPzcrPcgo5%=xSHzle>LS>ELl=X-VeX zZSK8k0&VBeH$g!Z+D2hZX0y2 zg?=8HhKAQ+uFQW;ig9iIW@A6*a%fO~I}#wtCPj2D=gXu~C4G?*n~kZ3J4Ta!I8V2* z9^bavRy>S97P<@Fc7U6qBa<=dn12+x#bRt`VariSI{rgR?#UKi17%;uYcD$RC4aYj z(twPIlo`+QH{2F8q;-)E#laXeR_!ng4~mY}z#@4&Ou*%j5y6sgy&iWQZBHUX==V|q+m~=p=?`%%LTZram>OwjmVI1dGJ8-`;Y$%+0>`-RR1^l*++>4POeOW_ z>f8h}oEfyzEK1HI1o~?lK{fd5FJhamVg~yr7$xaRJf{!0Q@n9KnBKaP$ zvNd^9^?`jNmBcif0YRDE|uuH@u6ASM^>7%cev|5(U=!JWGI)q*FMmCOI7qsIFr zPs@%g3D!5Oof}dbDF&fIRom8m;Lt1E)CNQJ)>@2tDI8`qM*xzF;v0aFThD& zG+IHp*G^+6UumEX@37LN1B;rPf#buAGQ{+dJ-XX>C#bvkSIN{D)nlk?S@#hE`lmu` zsu968MO^e^KH}S&&|~C2j6_@pXvXhf9`ox~qKp)iLOg5gb?{G_ zsSo0_N4`)kUCT`BP^yHYe_y2KA`TO2K`H-$bFnfMA+u5QHoGU8Qh%`&PgmMx^<5gd zPxEEgTT~bTpoFtnBwPasIYVL5a^rv2uz?F=Tq4EZ*FiqD^*BW1vlxVKC_%tE*pMg* zZe89ZukDP!{-sQ-A~1Jt8Fh@0&TF{-a|hKPU?gMF6&QJKZb|L1ck(WEhN0W%4>)p8 zdJ)seP4p7o=DfVlml`4)$aq7hX(e{oXG~(nU-q%!Hu=A2f^RF9?WCD7$NK$Xj75nq zL)G5=>+FiAYXlzc3>a<)@J&_iAguD6>TJP0cG|e0iV@{l&Db&;k!pRA$b>XFUex~h zejE-oucF;oZj$KKVGYhT$y6*7Ar~jFBc;I$*zvzBQbP>D2BO~bZYrxPRcF%cxOQ#s z1q2X18z~d_U2aFq^0c0bqkH&E5Gf;m9vRWHT>|b@c9ZbZ7@tE7eYZ%fsFNRPEQqOc z&HFJ5HEYrV;ygGZZa=S(|M6Z@7~b+v^*vQAOS7~a`p5lXAY4HVzuRwj5+bf*Zo3SL zrTCgL%>?;)nVOj7CZvvP5ArG2k@OcL9(K3Mfg$@>viL3GmH%&GIiSSDaO!g_THBsw zAs+qN<$!_P+``?)Mx2ltz%9g9IXZ{R5P8^VA>JmzqGp5Jr!wNWRxXA$o)7ckOK)~! zx8F=T8$ln99Z|Nz2eoG1VNi2F8}S6h4ka^zQruTJ?dOc8su@aIH!JM|IZV@l8~P~M zT?Em=d(F6%C}udxAHTK0L1z-on-nBW=-fML!_zy$I>Ys1VS>O67iDKk%shg7U7H~qE|bZ ztpFa^^HxS{vE`i7h6;AC6%h;OQdNJV(lM(PktFbf6NET@eYP|y!;EmY``k%5!WvrX zc>u5Uvd~mkGcZf_6sbxWZ5 z23UJ%vHu;(J~v%2_R4${mt@W4Ukwym+ZKltuISJ{yU+B zxtShORhSkp{>GW!&f{JpeLGGci%}Q__#iAeUEmQof2ZU=_0w%?TAxK=vln++pr+^UwxD3%U1w;J z9dDCW*kMy&x!lZEVvw7V4t+VTXz7;rhLB+CwYyOiLeCI& zx{($G!iWztC1Htkr@|Yb#Z;Q7t!QtV$F>w2|Dm^2NH@8LycCPM^VY5gO&~2PTQ)@w zLyz*uOjR{XzSo*)yswdN&PYj)t~M2f3zpEuGLY3WD3T??^O(9@YDmDj$Ca|dcVSwU zvmkGnpC^3~(#Z?}DmsypZ}s9fI=S3l+xinaq45{3%Bwb~M<eBpVyg z9JhC(OYYb}A|MrzyQeN_SlJ9@XP7kzdg42k3*nuvNAYH!9bY2(+BwOJDJ(8!uWo)! z%U}5RQ9LYZM>-h`nNr9@txE;Mq&_NLfjo$4?xyXV-?orM@UdO0TAJZ~X?kbMveV1C zJ-reW;>!hAckMKFCKiNfD>tJqyl_+~Y_&v|EultJG`W z1()Sz7)!l`y;i2ny*o@cYIBCJ<$TSBDjjE@6dwTteFFlkLSO29nbEz4r&#}Uhr*VD z;Qg+@&@HC{{2;)(_TA%dNI=BqN*1EC)+Kc(j|6EkbgVptJ|V%9;E3{p*M*?ss>Gxx zp#ziXQ%~&Uj66M|OPiU4{9)6@W*HKbe9AzE=yrtIi=Tt3^r80z8faWFVw!jgjQR+e zCk>)l3VF=-yJ?sYP%|us!WOr|H4dHg{+rZ{IDPCEnN?+2OQ-5+cC95&ihEu$Jv3gu zd}522+01ufDeJUr%syw>;zK@Z^c6~x?Iru2tO&N@3nsK0B6lG#+{W!Ay+&rFDne_) zC6O+l`Wljz=XPKq1*@D@^mhRfh50q#+-B21k&N3XcS zRX$WiwSK!qsEz(EPFoCB^~iqcgPaq`GU1!we2iZ9r~t1F;``hr^uhO;ZYRrm6cp;x zmNg9bVGij}AkUi<4WwUZ!O5{u+i}CNE*|`5%=p?=wxAAZoh8G}Udw5w3jz zba@74V2>WBuwGX{$q*=&IFDuOD>aA#f$5e`pSaA#(l?GMnY+FQck&guE7(3j34f5sn)y;k9AH341*w`v}5;|O>6 z+g7^ao0Yqq=Eg|6;IK%T+=8U}rU5@0g*W^g=IIESh7T{31RAU;Qq~)3VWhB!As>YP@D@eoD-^DUOm~!V|)2=Fxs^NKDLy5Xg&nL zS;KNfR}j(jfkFCGB$~Qg^P#HBMZsw`S?yfKt?f9@LTu$_*(gZ3geymt_!#~(E5y2} zN3l{JN>q!uXHu?6ty!Fa9`*Qu7=4NLxIepp=rq?Y+5U9?Sw8_4q1bT17U6wcF8h6Q zHf{#8B6MX(nC=|QdVCOD<`xFPL9VC=r3b1`woI zR)dm=3a+Vz_Mv<{(bfM}Wh}*pUvWY7!?P)>pf>$0)mT@IRaQH5rB2rd^41c!l`Wbp8N7 zeWtZoV`IhCSU(l!8W)UZpjKfUn&4p6{@EgK+)4cGxiu)npeJTAM$Hr;lhDzb&N_`+ zLp%2kcp#LT3t}NQ4W%*@bn}iN4#$(g5n4~cRnT;2jU-TmnCQ~xbf%x3IfHk-&3rf= zCf`ARFd@?n&IC*~oQ^mD;zri|i1l-GxVvrM?tQu0L)CRFF~Q1_1kU9B?a7XAp>-NK zXOseN-gT}6*HR{!BbSxp!6$c7S)6r}{_vYu%O>%oYycQ-S`j~48YWV)@-pi*7PGT0 z*FN8M@4ijKQ-0#uHbPsh4BBiwFR!?c1PeNLM<+%>v}n5K0B4sf&ot=R{Zp(!Ghh=% z)>_F+$@9L+PaX)miBPjmLFENv!&Mt1#%hC9{{obRkYxO*=O#Nu9e~E%YW4jyZYD zB!?^#TK61Ld)qT2$a?1tH~7^|$G$3tOe*xlqtH!k+jMvss%V^6K^?h4N85PtZKPp&d&5`85do6gTvp=le8Mu*L&vB2aA-5O zX*yz2LcV;ii{k`#;7x;p0@F#B_5f1Oy#G-!UdGa@Q}%y)?_Nx6V$Hgh;;dIOb3I-* zW;4vmslbSyC3)@93TwYOJgrXkSFJsYM3w`!#pC)pbt$kO!-}PSw{aJ44It+?1p%RY zzzvH1NRle>|3Zfpy6l@kN|oa2iEtFSvL|#a=NJ4K8%?6o2*(Vg94Q8SKF7Y)Urge| zI#^jp4M9$L@NY_R4U-+R5NI#1C%KFJKp>Ax!Z0V?SbL>lUg8HT^rFv^hAG%q9qZZ5 zLcgT)4~H1?560Di*8j|-GY~{xTIh za)mG|;KuU2ujyykq9(OP{I&(q6rbsd0m9>oIIvFEEZl)!PdC;uWCdB`Vui$bs&b&V zt+|o-AY}i9)3fE&{5?nsNI>q+i}m<*d45!d=;qN6POHz;bG&3bV?i(zHy|?}yN^U%_5FL? zwyG1ib3uvFn6BNY8y^s6LGydbBGoFc31>hveb97dFN#K$rbY-vg{GyGzMg|22EKO$ zBRr_2ubgZA3)2#TXDlU^e%kO4rv)MaD7R9K@LBmIzIO%2I=Faw559_a?ewxD6j75L z*YWhMTy@LSguER#zBsBkhI~LXAV;lY&LSpA)EOM)?7$vWeO}?tN$DchAG1({8d2OC z`&)d_vLCN`At`{5=sS+?sg~5cJ|@pVCtzg<6a`qe{v^Vj=xVoa4PDBheTITNDwPxQ zl-Ved>reZ*Vh*`|Il)2djfUVfPh%o>fYEU=waBhgb+3@q_n0ACdZne(ZG(R zQmHI#d@YRYh?#qpzBN}W2MHT^^QYlNDO0Na=PG&kfHmhs7-M$Pzv~Exo)_G~s0^K) zV{rac?Tbh>jlB_ozm@6_}I9CizLuGaBuA7Ei!n~G>q z0yxA{JfJYJvieSDbJZpGq|x0+H(&d0O!bb4yDH%=OF4vT#fyc^Jv?UB48JAtAW+&c z&~OUpi}%=_{Ncb7n6J*=p4BW^xhPHP&>v;&79oKvwQk;YXVU0Nvsy&rIS0S-&&_211Q+UR zMG0nBCNmZH4QAnvAN(#5ilolykE1qQ|4GtwcPqB$kTvTKvQI8PWy6;JL2O<7e^ST1 z54yBWDPYmZ1jAyW?bnfM&RrhsnKY`-^+)~WyJ;2!6g||tCa)H!Xs?Um$O;&M6&<#I z_UEj<^W6>U?R}Kt1V%E^&$XENapu!&8vr{%#J|sbj|{ZW+2(^8hI{okv6FG$=P}Bg z%dYQP*Zce#;?G~_FD?YRFu+kzvJR|=#whm{CwW+^o=G=k4+=3#-jts=^O90MI z*9h*Pwf_G*mnJQ`WEEMADfh#VX*qJJzi*UT6lV{mQ#WLaPZ)h5qY!+kLnpfe2q|Of z(azrbmE^9S(-bw7-4pd)P)~EH%bRXSyrK7r1THf^1tFB}tMX@9h8`wh^0t1Wz#Zt)mS6Sd-hpd%v zno#DcDawq9>!rYhj<|w-q8%jD!t@frC)hLb~YmRoj#>5MV-j?G)(>Hv6N)x zH5-(VQ$D%-3&ur(hU>zf#47wz7;SddDYg*^_LDv)YfbQK!XCF`Hby3?s6DO=@~B@q zdgn_z)2#}ZBKVDeeJm7-|G{P|q1h$1yWKPl2X4G)?n-@`-Ti!uw|hoxA)>;TH?np` zj{!)GTCq_#44u`6(z4k?n^Ohs*y9l%_&Dcse)@YIV|-P*hM2c=B1x{vhVD&4*(!b42T>hK!wrb^5>zGc$5E>+s z{8R9`>7K&Sx5#nZqZj6~7OOi3CPXMY7doXps|_dYhcNSX6Nla##t`M7j^Pf+#uRpN z^S1ReaAZ@L|O-Z*tZ@a)>uN1CGfFFOUN1zcN2nOtul=4ur2=|VrGAptzO46 zF7n|JfqK`{@2aC>MHyW>L%MUeeUy2!Hrva}j~|Z(3va?EMvJ(Q7_96b5Xb_UWjNh6 zLm1Yt-oBGG8##$!Hto12StMrao!>y7Ws~+IRR|^ujW87b|xOboWx91@d#pA}&(+xLyo1(wxUZju_g~Bc>-^Xfm*1D^a+sWc5TaRpap0}dhCoa;KcRR8 z_yZ+%%-g5~g8Eym*;iz25O*)A$*=$iLgGk><8*-_rHSn3XM6+`D-Tx_omU*V(cyzP z6J;@B(0j-n&nY=Wal-A9PF6%h_h-)$vWYqt;k${Xf+?yQ)@;_SPDvWT z>&tz=yH|k3Lg}U#{3ir~_B-$D0xDFV3Tu`src8X`wzzT~v?D*kVX+F5=7bJ4B{&5K zG{44cEPm#`lab>LmnE7OWGh?%+r|rsqzk!VSu8&28{}~p;5lLy$qX=ixO9O{X3FM& za?E*Bf|N8&WR`3|SnswHeM{WZLZNRaV|RlkO=1WDSR+1X2-%$roicYlS}Ngpkao4| zjDI_(no8eg3Z34byC%F#$KPYdMUm_tC`0>3(4LD%Mb|pkbYcmGkV3w{Ph&ehu3253 z6!eRaaYB>v&gf;Va=Z&iB9^%=y@e`yWByrXA8M$=oJB!&EaM&PLL{9&#Q+w~*0A6G z%HWSu@`~9ds36#~g*t7>yKin7#>-roAgxstHJe*NtsjbViVLKt0$R(L9EO^IhsXt` z>2(nQUG90);4c;6mB)xbHCb3Fk}Z6Pr}9wCu7GQBe!QZ8#06Y_1QU+(wE+c(bAB88 zDOldi4T17{MKj24*I$?6VhVT8g908=dxNmm(>lF%ZC~@I5)sodBdohRuWLqP_Qo?M zw#5JOVzH{hXN+4a>$ws<`f_GHFyPsH^(4Hq&jK+MUmV8VGWz@w53ki>dtuoV`oS;Y zIv!dJJjd@MXB7d8H%w!h*up4>3=Ht39 zof((WpJV4n#XdUeP?fyN1zL#@t=?{Z}WYZfYNfF@+mY z%nZ^ex6*?-#L7#|viW_wRcLsGQ)Xvu9@aM^d~_9t{#em;TUbE`5w^^^$xtq7EH#4q=g+nh*%piqS4t}yIBAVS^h2dK4 zcp;f+F?+L*)yn*5q)YfAy)KOgbje<|7t;Ku$A`%Wm6jTSfkZ$wMF?^z`Uu~YUeX+q4N=iZEH({FwccfyeN+;mS=d6R_`M-=*HL6`eI9I@wlmLfi04u z0X~`6Mo|JI%95NsJY~ZfTxg3FHa39#+7y&>eWw_Nz&6yaz+Gs_&~rE)4wx+Cyesrg zCeOoq#VPSos24}+C&c?+7r<+4dB_~3xa+`ho-dZ0G$S%^+`c(J5 zxiaw8k)>n^6U-~M$LHpBq_}h??x^^R>+r1WBbT@{tm}JzWKQNfyCN~8*}?fccy@qy z^I+tpF9Q(h(75at|9pY$&j~?ombHCK#Vu9Dh2v8`+bUzLS^DBhO}`k$V0}k*((ET^ zLG}N9GkJ)?z9bu;S9^OXs9BaV`7&IE2m<~-5R3h*?v^F}p5XO#1$#^oA-%AXDE?GF zr$wS&l+Bvlv`)VnVry~vZgxvS7pwLkj&$j7e`OS^9*qO1K(+L&>)xKglx_EiZru&y zNY3C+15=jQG|~qCdK!c$v#?Y2h}z}bc^dtOxjHfkYU*o4ssdni5>o#ajoeQgt{$OQ z<$~Y*=DFBz#Tfey}VGjP{nbzjoP14pGMXwtS>dgy77QMJNmNxlLAmN zFr(2|RugO(3%m$AAV-Qla#e~FK(<<*7ZtlPz$Runc|o5B6)o>c3@zaLkAp>Hxqbk* zXkWdNT%`Gel|Eh{Z(v`VJ&NBDa3fjxaDwO)k0fgyGQr}O}Mo+XU)XCT~IUqLI zGK|d@Ra&a`|4$)o!P<*(0!1>{eH8?=|6-ZTC+#8lrgh`2Y36=%+K)5;;RrBNJ09STQIt5U0m%F)a55ZuEi>3db2fOT0T&I z?$ODA^9YS-xGR_2<-3Z1Uesfwq>bKjbIS zxy|LcXCWG@ikXmu>>bFFgiSlv54wB(6gSY)$P#uR5oce0q&YpQTkp}g3WRa{@V+YJ z4yHJ;wtM!L&#b1bfCPcwqwHipCpdm6X(;WSD-lwyLuPz64mtHMEIxS3`LaB>v~WSO z*t%UNE8s;|`KzMEj>VRTDN_LBASH)}df;FJ0H?-1jIIzcF-|YTrix(AvXs(WoU;GC z@dBot9~Y5IJDaA8II%_6L~#*njZ^>YTiUGV-_mPcWJJf)AUVCUqrTAVy`Dg3nDK(c zCR7|YMo3O4BhNAl9zgp9uLupSWvXwNol!oLfCm`&w9OH~^cQn_Si)c@!;70apka73 z|4bCx5L~_in zBu*w!1Zd3JFX{{|pQ8jXxX`elDDcMPrLg$=$`=)y=zr2t{$NliXvP_3 zlkUJF84T$+&IcmjRZ_pGUv3JgSsTwB4uc9|ptAgg%ol0u*SBzxAF;FViV-Lk1J*tE zgq66C26~)?jc*57)G0-wB&-bfJCQ&H6Q2FZPS$MRa2z|$Tj~Q-QE@RD&tQo0%J|dxhyLisfL_Q}z=-+Y`3Tv6{l|2k~k zCC(?H5~keq4Aob<#%32b)iqK*qX@(Th6QV*r! zMd2IWIz$M9h3?e21M1YAXifZ;at({m6Rj(?u^pa1o3F`LOtcsz#^cfMcstwimcB9# zXkODobkhEED4wCDoYIf#C=R*9NRjs;Wez6SC zk^tet(VJA*0>)-dUwy`B2!DaT;H`L|&NE9K0HN;IZB3C_Q7JI2fF9 z3OeJW@_No^o-sKaM}+2qKYRdRuapB8Vgy16UV?A&Z`k+E^@*eN2JE!U_i1ofj>wzq z?(f}h#VE-wAvK7}_Nl6Qn|NO9=#qbw4PPHNrjbt|QErRfc)^`nQdo4g^`v;!zz+2w4CpA)`+d z_DErl2=pPeI`x$?{kHs&A#7c7B{J^+(Z=~fxaPM6jv?oVHeZ#G)#8GQP$%)+I>g7p z!>67$cL}fV6>8j@5Q>Lf){U|m6jS7Rr+d)M7#--$b&}XPwfy3s1@)86;CjNrOyi85 z6Vk8QnkAmtc5m6wuLbeSMR;H)0L1QH!^yX?)$QjyvO9!EIPK6Rv9#eE$T7#ULV%OQ zyae?Qe;X(%tZXLIJ&57({Ih$SeC@GFX0xLSIM+X4AHk^aCq|J0o}`u@$x%jLC5p7; z6d9@x&8Tf}vdy}}pY?*G3+KY(-a13Lmcek{KFF@m`%?h7ftMukDl}n7rMVz*Xnlnz z(&+ppl-g{(yYPWI154i3-NoD#YdP;?=vjaRd56sBEmzYTa^Mj~41BQt@va=5a1{uY zw>7BujKjRvnN!=}im~2RI4cmbK@aV;4cw&EAt>Eo1ye=)yj}KCU?(Vx_0*UtaOCM5 z8fFk%v%?8Xxn@DDIZzzv30PEJN{02S!lpzoS}aXQavVcvU*NvP->3Wb;#(leBMM?` z?^-IgOZX4QbDdT5cQ@A%j5a7uR}GuoZPefk?Uj#~H8*hwI%6;AJYmZq?{_A|I)Mu{ zsOI8nIN-E^J#X@&^D$enZUH>#GT!`Bh%F=`RFpnVBar?vi|(DY;9pDvJefjwO^tkf zOia1bF-o6){nz!KAE_-&NFYb-PL90Na}eG2K1R9rI>Q) z>SxTbvO2d42!3zjUlz^+v%gGb07iZ#rp-F;Q*9 zgd?P0Fexj0Mw@hy*&GOo5Y5pDw;T2LslW3a39p=JbRgiDhquCI-b&{wZQB^`3q_}1 zr~m(@m+B7~u;qm&N|l8X;@@5Dmpg zJ-%(YdB2{n=f7a}s$z0;GHfM~ifv_4OZpB8D$>GLI0S0$Id_hcl?P~9AzcCn@_86y zS1Gbu_!H!49Q5^7`b2sMqPn5Kd;Rb0U_^o2J8l8JTK;lzoc;t*7%^wKtLU$g`{}u) z5WM=zMrfA;`-bPL>skEUF0tsfU`l^c*TKmRwrB-Fvlr+Bfi+73UUC&09BH*E(tHI+ zEzgVa&6d>H9!HLHOT6#6GIjm&7h}3kiP}MrF8sIOcbVKIW+sVtG4uCuy4Cv6XHk(5 zEI`P7x@|#Z@!nlC>K;oDdlTkW4Aau`pN5}-EyDw=p$IH& z$^oR>FXZlrXFK)dWZg?FA?sD z-%Kyi7{6%8=fk8p=MWz0)d&kuxF=38eb?hNv!uduq%;7bRoEMfE`s>V8(AZj#C{Av zw1;$&A2KPwd5Hw>8w*kIy1qFA^=SH;)3I*L=wqIk!~tj(P1!uPuRlKn#bmu2TC3}} zPmCVarmO%aI@n7b)D#Zrs>yA-F8)X=$KdCOurL8`1WUbHo_34z$2{xqrlVIyhNKq1 zW{k-h;n|5re)UAdJQCHyFRK0n(InAg#*+-(9`FIA^)~&=?y_S`jvhxsSK0vB#qVd= zMf^j|8ZkIO6`(Z4(gLwzuHox{3%PT>*r$T(vY^cC5I$+dQ2UWcbdlqp{iIu=H-;Rh zo6XT<3)O=hLr<<#u0^w3Fhuecs&|gG+H?uQAe$eQKT^MtPso5_`XDKZmI zduNN2%Fq6JY(%^I_T25Gs3?~0!|`G42MK!?jaWYm%(EzxL1_C52sg{pRsoLdk4q+o`qAjBT+%o z;Q2mquEpf9Jg7uB*nCeFZYp!}6h~7}4W;IF7&!p14Y#4bhy|~oIy8m^7UJZBtj)k5 z{OseQiYzcpcD3j)wWGFk0*TZlVK< zk(F2|Hdpe0?y*B-IC31Lh$VQ4cq#W$Q2hSU<<~1c*TWkEDt6}VZjP&M2hZmL(n3>& z6`3^F0BG1m>nEHzrt>`k_V0WSIsN2i=4v3GE8v&gKNvb`ME=-nc`%vy!m@fbUe#4& zyx6eMjpya(7uUMTUJdy?}T8}vR^l_PGJ=NKO^m=zp??hIvG-kFt@p! zUBZc?Zp%un#Sii}Aoa5w0(-oz%v3_m6@1Mg0thJt|7S66o}dEem*Wx)J1_`7Z`J{C zp?{AveW@gAGi@B&zTtIG#O8-U82hG-!NSMX_DnAM3d>@FzbC@Z+@5>g20b~Z#wgPX zF@LT=E{GGZnYuo0Ubu1uO;KpL#=k_|r8P72jZz0MG$TL(Gd;^+9eoTLu}50ub#t}V z=9LZXhgtTy(FaVZKEMs^KN9#Gq-OZ6f@fz~U~VJIH|d2+3M)$qj$#%D4n|>(=t~g} zh3BmyKYJw<$PZc$c!zznRdsiY1mMl3XH(W$hkuCsPOZ!XSN~dNA-535cqk=5h)no_ zc%sK!4X*o4o=Kfta1A+|SKLJ<{bF<)e@37HV1xGDcF2~7(Jk0%VMD#Fa(5~*eY@V9o{x-oFh zmhB+AhC58JB4JyW(}`K)j%CRA2{RBV9xVa&TBsc!=cFPP-pSo;Vqw$K>06%6h~J&J zpZ7vJwe;7U<7mWKtcclplFwnkmi2@_Srn{s5an&;zXcq~IGET%`nUWzVAf#|c*f1^ zpVwAJehKDG9=FcNbQrZS$*ICK%v|l-h+rk!OYnsVGZ+cg?9bf;u`1DwqU6v zgO8@mpYJGt9uc^-$_&H73*5tFA@u@)G_9&;tSbstJIqeG>Qb34Ua#v?6mZtP1B952 zh@xLu<@%ig>EWD^Sh4|kkbwfa6Zt?vq`(p`N>-goFV>7b=VO^mJ9Ka5mD%rJ$tk3c zL!8DY;P)oF_s|A_;lig-Gv6ATh88UsG1^oGyDqsF{x~Xz6&F!L{d=DIGUR;0(TY*} zI4d8>+%OQBpFspNWLvA-J@_)fA&xorpkLOJY0rpQ0Eg>IZQpaYj46-C^VZge=`6;R{2vQz?g*|hHpu-jMnlkhDqvjd%4%K-24~(ol#J-enS;5rGK}t1? zc4QSh!411h$lu+x$4V%#<}x#8L=zx;y`@lEU6hWiy^v#*?)Yi53d;Ri^8r&|q^L~9 zpwNbdixvK2bt~pqC>F>4upc^y2lBIvG44f*am1dDvLzD=P}U_*OgaoWjLAs^UfWT- zDdtlA!RKTj@MqmGtJg6=0kMaR86o4kQq)ALGT+^(GjMbEWQnpCz3m5B1`+kjA;q7V zYoAUiax)m2RjMb!hsPl<8Su7wi#?p*THH_-wDVztxj@Il!E6cSap!!jG+CK*`#cZMWC5`=P{*#Ek5b0L07bW3m&v(Cb4}4p2h_9>%XO(P&Qy-i-cI)Vv#I>@ic``4SR=Gh< zMl>jm14~$|oV!^k7);diu=&&Rx`=cIJebsMy1Q>F{M7|bqWA9`PeEFX8nvm0$UFm* z(-w}GZBMx-C^^x@bM-T(U8d(g=a)dJrTbg*RrKKn2GMMrtR5U#xr$AQ(K~|>^P>c4 zDwhHI>n-&``Enezt&i9qyyW0+(r$6sGN8{|jU_| zJ5&&;w|gLFR}hV$kph>SGVL&gLV+SO8rrsU`Bzr)JV7}eOAp2@$WiMRe zeY4U!9|i=+o97h$G3~y5-TsKOIniJ5+otB8VD{Pel^P(jx;BPfwmzrsmK(W-sfO4Y z)8%NPm250E*UdTkJ{3}OIE*-?VIB{9Z=#g#M__mnVfmB&cJ*dPjOo=kSV>t+!IE zb5!^%T}Ys)feYvGdR(CNp6`-J`69+<~Cr?b4`D_q)xkydCot<>@bL{6RBT6slpZ{J3s%JhBevzHCS~*#Hs`RWI z1Z*mr-Y!7zg%{uV76Y0x%mADrefzSs;f}218Bi0dl<;&A*-cYMu)f)_jZiX)kZ`O8 zf29T*Akj#TpTE7dy~zEhhLzm?EUZV*B$|AyvxvMmp)63v1X+Z55on&WCfEF~iK9}^ zDr(Vuk9Ld`q1!GVJ_xb<3j{mnw%m5WJkc`duere!Z2v78TVZ)Bk@1!hy-rPltoJnw zXGz)l!9*&>u3Gi=*pTu4zRGzkuLM4i&nDY^4^JfC_H32^JEul6v+h!}U`qTLN88~~ zIE$K*Iq+MiI0}-&DaLcd&_qkV!F^f`e=x=0j$Nd7@nQ`AQHu!O2LN5fNQB zjVHP6HK{-m+^nZ?a{~TJ(KBI31ZflZ{ynIbkJ1UpEW%mcbP4)N$bTf0BK~e3BQvWl z!MxvnZRWxb+DA#sW~gvfGaG^1G&9~e;LN{d&j80|d74YxC@}@xHoFf4zF$BD_POot zp2n)aM;>5FGS0Ps8hc9p4{D_8x?2PI>G5vFt=dQvU7gwfLt_>CEag|O9CN1BUyLE0 z+gX9AI{gg1>p!ZA^ahTFk?`zm;@Y3;)=BABJA@Z`E3$3_PTz&h$O=~L_13e~Jq|5N z05NscZj2UImlQQ3S>Q^QeoEJ~9-Ujb$SF)azW&Tz)K4<|r`&Gmg1MDl(iFW@Y{xTB zAg}pGEsbvGw#_Fd%xgw94Ui|58;tY90a`kLEcT(m^sAkuo(B=4JM~5V=>MtJ`kU@! zIfGjC4~{@D8?H)DEqvBp>56GsKDFn}qF$~!4vQij(gBs?*u+B>@f|`n^PKI8fqt7) z70?P$^l+*gk3qyD#6!cmh}}V8s*L!4C@k%^vdw}_A$_@zl&ih!Jp*?nF8-^;+Tl<` z0=6rZhgNhv{XfPruyzL4t;RuyGq)rxh5~4T6_F`Oa|e8rPCgkIOXaThc%^ftYk*M= z&7?1)G;C(o+!h9%l+|RFKydsb5OR1h44kv-cYmh`&vIKy{)X0Ae?X{ zc@-gNCs#1RJb*a;g`-Bb2jY<|0V3-AG}*v=LSS6pf24T;BEI8g_euQZelj!bFxkg0Y?izXF{z*0zv?u3LqHOF_ zuE98o5^Jc z$InjSau(pH-onpR}A7RE3n&Q*Y&-CB`czh5O;0H{mfHg2XmvzYybrp*gQ+P8OZ-_D(76D(0Z zDz2WVJ+fhy)6ZSosTi*9Tt%%o=?%&k^TT3Ldb`+pn4dt|bUdMg{-+^9A-YR4Kg;|I z0tqtwOGOMb8i7Wug5%fOU<7)E&YN9o%wwzUvG?3j@n)mX;xQF#t1?fmJ=cFnEio;a zyp=M2z)x-XvSPqG@Co=>VEJ!H)S{fAf$|y9sSerXHia^nnN*8?KTcWX2JXD6J&x|` zRCAIuz_<6pqPWz6y-V=np5CV*vV){)RDbs}H1bVb!$4{IM!1%u%=0(Zr`q#YhoeK4 z>11gTXX_Tyn#4`|!_%*1zXIjq^Kj^><~rdLLaKVy=25dNHRC`!qncXwczX467@d=^ zhB1$8M_NqdVstO_5XvTG4e>-YQ78taS^>X^G%PUXv-w|@;@w*U4$cpsj_rRCy)_BJ z*=40W#zpD#FsvneAo76xXlt_(KQ^4i2M7r9iV@tD)uDyrTbiqjuNuTw{T7E%HD#8aVKf5F1e$(KPi}B(^YIt7SI`& zhJe%5=%Kx_^%!+`bf{oI-5M`fVZv;l5&UDOlIf%15t&!t@f{W~STN>8Sb#Kx5zpxGp&S5!f&i8->s$Mak&nj9$wLC&v&p$66by?j(##;4syOYXlJf?T?9G z7eRYyYY&(Po&=TctgiF0HnnFP-&^Y63xd4Op$5nf?L>7)tM;Nt_%vcN0=@hwm^+(IDq z)wMSpM10-|+HS>0w_~{NOx29&t+N+Utb^rLX9_!|3e_skR_DEMW-Sgx;(g5IvEU%f zF+k`k+8j`Tu|ihs3Er(I;oJL-X9UeVhcOX;H-p@j%~X+hH;r;7;ino+BU`_c3+Go@|R$jpB4 z*5zDEm;24fEio9`QT8f^>^Noc{nzuCDr|YNS9vd4jzgxHqTk!O9%6SYM5YiqAE)my zz4zX6{4!sOY!p&`Q1Dx{QN*~-r_DOvux2O=G>O&;u*fX~0QV{k;kUVdSY`MhWLIfW z6?~_0_k%+zz2)a_>%FI2e%@T87D>SmY^Qz;;UagZr^B>wGIVeY>m_tpQCzsS@Aa^f zba(#Ru*$ZwzzN0`=B4$#CxdY70)l{1x&*zSjqf(3R`&P+4m?G!F)NrI#2PbA@KzN| ze(<{4{iQhq2)G!R|KGM$0N`8em@Vq6cneYCt)jjKqATlHMmarkUf-Bs3z&FjqGeN2 zZVybCV5bg*qUOt&;q}Vi_~tk|LkYrNZ~W?si^5h6&Gro7DQa@tjcy>=q#>PAx^Xcv zLW-X7mATU_8k%|V+H){{0g?HRBw8S|r z>WBXGK_rxOn-}GKM5qm0N7*o!_@&r}Kw5FTd? zgc^ivKQ}RpJ+pMU^Pcm8jZ2d|pjCU-wBmKV=1kgJPSF|MWa>k`c8bKrr8OyisWJsD z8rB{0H(b>k0dK{sK413HE&uj){)%AB|8_eMWlRzTN_YHgzhD_vQv5fI=q!TYVLV7e zTmU;oo$Oq-e1Tq~>E+3d>(lC;Z@}^@ zlvX5N-7JhW4K=EsaekZCV;&8}ga4Z+pWfYXF`Kr_kX9|FyV1=Z+-UjZK)tCYUqJy5lny!*+`8kUe=NS;k=kxV-N3b;#s=e(&fmBBSAsrCqx z)c{BfnrJ9@H;1DCPu|u-M&_zc&B1Ahl_HxZ>JUb#${%xs7b3-}WJQ(An7LmEQ%lgj zL@beuo3tsq01w0qVjS88(C!3^nY79tJz^i4>m0zZ*8l-GzX^fCdw!fW1-9{7^wJFMm5y_s%YkkA4(Xj=>uQFd%zp50oz>#zMoT+t9{-_OMM4 z4@Iwos4=~jEMQg8k5vRLEDB9QzQ^jW3AR|!Z+RuXJ>OF@UYRbANX*%29Ehh)VxK!? zKvebtx-P>`yl|`#X;sW)^NJA|8$nU-fwh?%U4R(iH=#T7Poouq*Vb^)b2`0ShK`)% z3hG~V;4g$-LKR2kuf;3sbKikOjP9gNZdHgx&->zl0Eo^tb*qS)q%X84?0}@1wA3BV zh5Hd>2qXN$`xt#(IKW}!dMoXxfiQu$2@X<7PXiwa+-8dMtAr*(G~cp$5|fxSt4$Mo zK%mUhYhT--f)Sc3!ghW)A0CS0qU{|fWMf>*SvQ34>T`&>D7zY7bNHS#g&DSm%17_N zhWrP`!TlE04%qCsoT1G$8)8A2NZx_qC`z7~OW(yt!Dbo(B=+es(yR##hW_>bkZm4Z z97@gK{_Xk!3E`da8O11dVPzXAN8DJj+B+o3gb$ws&pEXm;#RQ}aR)ozGQ50dJty^bTy_`Hw|1 z(2?#b=T;{Al*%FF7|0(Xv4sUNf`3~_B(dEV4C!}+*7uo%w8;MFP`-xLu4te_-aR|_ zV%RHLv4job-bIh7h1r79xY+{mDV)?FF{Zf91s;Eb7V2Gcg2d+J+p{{yI@Z#GGQ4;C zMr|AESUklSth}6kkOxAaMhYS@<|Uyf|FQeFEj#`0HQ00yTu>7bq;3pz+@d!w8l|~O zUb&J%=_ESTFNZlScmsfM)_F4b#f$a3WiO`>? zlbe?TuV#6kzVBxk4i1AUO~qY;M|m3Ay?mmWc{*f$TIe8+IRUsBAKF1sH4Z3NZGObp z;X9835c}#ht*2xx2_6>kG*Y?5c=yPjo&oz;&-|4n-QC4k*mq#I;e(TVvF{SQx`)LF zrtp~;23?Cyqrf^;tK#9F&emg&m9}YeappSolkP~s*3IGPt1wk$IFT7y9f{r@ zlS8~}J1N+j!!Jlby`K1731%_zsZPPl7wl>&!a(2Wr*Kw97<8+TT@&^XAm94B=2gz-(Ug}xxG=5f*o z3b)g#eVRGP%OAJJk%15q&1)%d4$A zWV`P-D#I@a!&s|uxvHAYEZr~&SQtM4!;v;y966y;gBtZp-|7e1vlPf9I6CYZqucU) zmwnkyc3f|8*Jf7!4RV_6y5)llekuY_c$h7>^4$jCV^KX*x7)SA^Wu|>btq%)LNsVR zfTF6e&2DRhH8NB97vxnop5|$>*c`vPONNPo7|4LbF7z?0brTnvSG#JX4TS|NCcg5B z%GM*TV$WqZ)Z(MMumJS#in7Qq#*zo9CXH`orVr;%Ymk|w>CAT5k|Y-EVLlDPYsjVY zr}cW)xt=NRZH>ltr{*Qa_?gIVhjX#Js~&pTUmlmm*2*`Z&2bbd!jfx725edA_s`my zKDviGDSRu$sE0S;MA5pG97&4s%Y47&D zNaX((p_Samb3Ae9p3H!B50_W2*))ZS6AwNaAQLemBwJM9v2zg+A7`JJ1r&5BnD~lX z#u2yVuVlF%C%AyZga0%UzRn*P1m*I_K;5X5ui1rUc z)U;R!g+F8$fOeBU$6k6?EI3%W6DJO%;bw1rmpG`j;ja-u`M=NI4nEwI&(e_n?Z1Iv z;U0}A21bN_=Lja~xU2+bAzgU37wL)ovj+K8Z8`5zMO1E(9hVay2`!VS^>@MES~?4q zB=^D2!=zotjMLn#h7_T&9#(Szv?Jg8uwX-EtlLf^sNQGln*8W?4 zo#kA5Z@#o2-?he+pohYN&a4O}ERXFLbbMCkX?X2p?Kks-1M)Vtk>Qv3g5&4@L^yS0 zi_o@~Yj<}1Mpmd!u=+H182@0?=Wj6e+kv_yvXU_~y9R3^f=bml zRue|f9Ge-86!cif^|FDP0F*3?TdP{vAho)rU~&lJ5ChnxVO*q08s!R{(s5r@UOyym zuuCT*kt$zfwK@HKrUU2E(wJ=W|MjPmZ){iw978O479fa(ZK-WS>S3~+rc=qzJJyv@ z!JKoORTCO+%Y4O&x~IKxEFO~Nz71cmLPk_eVoL)0d8Th-M>Dqi2>2kLp&d3|dBVqT zDF4KSDq0<2oiTgd-J$*2Vn}0(JMaADG9nR{`nn`e!(5NhcPOo}8&+ zjwj8JYqfP#Rq%`g@B`q*Te?ch9kW58Ds`5OfDKGJRpZ!z^Qnv5u`^N?p{(v>@s=We zeJw>c8d8@@lHSX@YfxEde`DM->8}rZ3;#vg&~RdCG$npzCoAK|Yxm@&h%lp_Hf=Pl zG8(F)2;RJg>d9xelxUR*CtabL=_`W%54!LGX$6nmmxLJ}zZg106BuyN<6QIlv*Pv^ z>*}33dXQ4g(E4m$uvu?;xWUQ|;|9Bf~K(x z%*eA(E92jyaD!8nbjSI4D||pkJq+n9Xwdh_ej>y>Zng_1W#ns|frtwTH_TPF`qE|# zaH&gf(@EGaJPRjRY-&!sy+slozVCot1*a@ zF>{ZDChzfOU-FWtMrGZ4S?k4FqgV1oUd5V}H+ObT>Pk3qRmS9K^KF*H*dFc!yWFOo zCeW!(`o>hjd8?43p~b|@8rfrVd_OfbK~$#GR6Vxm=z!KvU_1>|x+lh8x|}Esbvsmj z_6u&OP*6Z^X`|OiSbGQZ+|1WXQ~H)FJP4mI5->M&sh7pP`0FbnumD9sy1$?$79!jc z^}W!UVi9dp+;N?BFN`_U6^)lPYz_jb{Hd|3U?!EaODp#l3VUu|lI%kjIPaOuGWzgi z&?hRtdSWZ3+FS~SwX`m%+^6+JiKQ+VZ1tG7xx%2^bg#fP+p3+3eoGbm8Kb)1A*l%E zlToO)&3Y+l--s|3K1nOELchJdB5uF}65Q6d0IOrT?o^ z(~}{1A#KTnGnbC7PdSL^Op#iT)n)5VYuB;ymyl1l7Q+Fm(}w)15IRuX@v>mr3$@qHD(! zr8m$*$eGwU(JK?9Lf_AA0b*h#yMSJU$sSVbICs5W_0W?Mb-|!WJI#u^kJqC0ohEtI z-o5L}#^Hrbpd4mQGzmh5UIiL@1nVaHIp)>oNZ#*JRsgnh%>*fG?fxW^5tyQyh8s6> z(?E(n`gTsW^9pEW*b-e3YMUOsI*7z7d+oL0^ss+-mf^QW+5yx*!vn_lbeeWmvr0}tGCBs49WZ~QqYRo96i9) zKu(zy_mRtTAYV?8R3klN{&_|}&iZlqLff3k0j5$kO(FQ>yCG1AF+dC3Z0>%hxyZhv#RTDlezWeIt<2ZFGS_-)&fr=%s8vVwL0} zW`$a-{up(ypMV%ZD#%miQ99o$te*$+8)nyWW+}9DO14e38`%eB#WS(rYpe@zdq|p{J`QkXC@C;}9?JsG<*2GqfLze6Hodh4RmxgzRVF;sH zf&QMbHL{zS{6li$2ZhHPS8ctHzIncz5LjR0DkeMCi`)wwA4{U^FFzRWEM+D54%+i~CTQ zH=NG<9TOI$iI@=d=0XDGkjc0)H%zE6AjYo_?yM9gQTA=pI{iR2R06$rA!8yb)R~?% z^cDWcvELOd=_f6{4jzPaXqeusnpaSO{)dYFQay=l44=+*|IN z@vIh!FGJB>C3p0FOJ9bf{k$K3*)S_PPTZ^#ur5rO&KEFkP$xWf`Nqjq;yI#@{9-#} zrpdSJ*?#r0zS|b)r-*|O=r5iNa{aV~euW2|v;ScxP? zk|Ysalx>&fJov73Ah~m~m>$Z#>c{P@#!?Ud09n;pOWlPlYuX{$0CrhE;0Kkk-FWj_ z$PM_RQpK`rn%oMYJL}T(*+V1x^@S|qqp9lQAK$DL)MVQ;0SqpsgAYt>4cd4p8^O%N zIw!hbU!mWw?i+EZ7#Y)aZUo!E41-i1-zG#DMsm3L*KQ={9&>19WR894KN1iHkJXSN zh(~1sB6+v!Dl(irmpC>}7)W!IT(Gyvo}3+xc>l)NFG#s`YEgDC zA9pYq9Tioyhf~F5)r-D83ac;8A&JNPWKe{vpc!!scMk*K7@>ftyE{1o#*QqHS-;0)_^1Ln1fV50Sg~iN zB*aiA#sDseVaCxvPbtI86wOx56ih^+=KUdyhtCVg6LPU=vO6A-ktle*p4Q9GHBl;n8!r)mG7dmeZbZEyOh0o z<`dua-lfOp~h9I<5kvNXY# zI|Abz^!n5`&ODb5=3+;l?daLBdug=$A7%ny9yB;1HGlvb`ATX0BhKpAH-SG~1sKiO zq-JPK?DY*Tc%cMAB(B-=%A}vQ``doWVufRMM8e!viHN-y+@Y(dmkv3>u^+;H?<(Es z7_*{dgk2i9!`v$kn8TB^@_KjB+S|4b*dkVefj4~Lnu2w)Z>Z#q;t>a4PkI|CZBu2h zMQ<&6imby0jJuU&ga`ldYU&rws;z%f=kf9S@7t4^iU~~GR6BTOO3O*zhxB8?oUrYV zI4IKMKlAVr^d4>4j)-SoUI^^ICi-&*zMqFp--D@_u0W*}Y@G^z8O}aqZZ@9ETec@( zFC6E2Tp7_dY(ZJG%|x5osyh0ZA59!U2WnZXqBI=1Vs5kdzOq>5vJU}99+dxrhc^LA zeWp$H$)&no!g;#cE&m{Wy2-Bxvz^qu20wyP<4<$`nyppVIqh;hyGn9Gj!dG(JKM~H zi5WmGCWvk;gK0s+!#eP5*dMtV%DD{xR%F~qD!UySZSbLe>XFnI;wkJ~!uv9`T88tu zZ*3Gcq-e_@NqejN4E~ee6)DyO$Z&o5JS6V+6{e|9+T!Ex%4 zU}+-3F%IN<*V<}ikfVv&Sl^X3w}n{}AJ0AuEQ|T~Xcjy9VLCVAC0Ak(3L(84&?lAN zu5XdqO8S)L4{)I@K2#I)c{^Y?gw1Q7wG?SW5-&yo?8h4^oF`I)Ocic|W)P_b;kuHD ztaX_ex$^GZG@&kh3EuyCNFoLEK@`(!mB;xJO!hD%dIJZZ@}u;=W|o-qw;m`9X%5Gd zDl{0O?^>^-$5L;)p@?E}Jo1dM8srV%nLbrF6~mB8NRP1F{0&CRt6IY(5P)H$7N*PtBwjt%?x?Bxa|;dqL%ADFwMDgzRk z5^9fkG@U>WzBDSe!L42E1XisP{8_n5<11F|;PIG1jtYj~O%)V|3aXFkW|unA)-L~! zrGSQQM>|@-^-=es+F5(&WwgPb`X-iCz23X=o`^tmzNKHa_715PLw(pi{LNJ_O72{dmELX z0Q_w0nY^k#$#l)Kkp4u`7d*c_A!8LBvG%V$ULc;4qSTmqRWsF) zxK-DJz2JxUWWd{PF;PmAt#8XQ!ouls9*W64)yf8TEq<;MWF=xm(r{sJvJ+JfPa zy+H4h#8i5EHQQcc&WH__@RtR>=e@Zd_*C$dabC6G#bSnN*h~U|R7_EP04d0N%KUCP zsLKVzk~s%bd{M6*rX}jDiC5WHp6MjS<^Yak&-)aw2tYk0!>$_plN?hHg-h~0_bMD+ z`^+{z<&NPW;f9#FW-Xs+Z;7g@KMTaOL9<*W<&t4_?q6)=bOu(bKtoeFU0|r^33K_Y zK?`k3pO%O-->FB2p2W^oQGI)-NXI*Nb;|;lZ-iC)AWl}mkFBiC3G`b}86!gH01oS&r{)cJ`db%+ICbOXeT8^fQ`Kq~41ot6%NBe*b^e1+6cDO&&cev*A&? zoTCtpDt`g!41D%0RBLDXz7mTfj0_fZpyu^EK;+s{A}2y%soxjXRzO5CEDap^3H}2( zjY@6Q0(2U^0{SEjd0XPBJy%}iM!|D<79FO%lq@;F)Lb?y!{57QOo>?dgvSCd8HJ4Sg?VMf-a})~Yclc0 zZ-7=cU4uLg5O(_o7M)$Zz(uj`#+fKmk zCA~nXwuW{Ei?RQJRb2h)_KUL)P0!5u{Mdz(@e%^Eta1l8Ly-5LCNACy$6kSDfuT15 zm#YGq?UeOL23OzNn136fO@utJ*4+paWIN;)J&Ih_6$xr-i3 zuP2KZi#5@(GUB+WiA3 z>4~Qa4o}pfgeAQ>d&s>bZmM=8*$===xYv~Rl+FIr^sDU?3^~nGOJf?wp1g=9WFw=_ zA9+q0dGulRkx9t_Wlf5k#~ar|`dnwX;&0T{S3G6le(6}un+6T}8T#u#ZsD-((lhS2 z4INb$AVH^_k*sxLcg>nA4A6jgCIu6BgBWGtTDL%8rSEP?;d~4u4QeR3f~*WyHrE#i zd&b4Zu&u$3^PlB*0o2?b6KiY%AP2uqoL4`=Iit@RLePh0(vNLJVxsl421NoyND2X* z_XT%+?N(zUZ(!!`%G?bxO?B@leOaJN80#r@p14>0!1^;1s8{bN4a-;xxecly6$dHUAxo19X~W)azMSO_Ay0ksi+L;}#VSlCVt+R$~t5-{@)( z=kn6*x$I0#;;;6pn!;awt3Am&=p?2NfME-U^`SU#EStmE-qRAmBA=$?|3JVD?b z2R(VX^q4F}Ilf0!p1&ax9~WO+Y>bfulN(~xN*9Cn8Cjt@DtQNGH*Sp);_XW7o`Tab z!FsRgz^!3#*p? zZ^^o6V_kbLrt*>{;b)63*tc%ltK%OtqPt0{1EZM(xS)33=1-uUG5MkT6T?L+2yvY< z$4#YXKoUoy5{3WFmBK!DITH+KjFW-I=nZyCu`({4x(-W_W8cm+v5$t`1O~}wyAvPF zg+KVQf^J^muF6q@Y&mRgDFyj@+~(ozq}eZ_b51DE4n4`k2-wkx{GS^PU^`SgI)h0{ zLPD`PDn;-#?V)t7#rRb7u zGWKm|KxCSvPfJBKl5qX$=ADF%5S1<05~9N>{1LGtX}q@0I=o{wfoj5(-O19{qZ1(K z#g2(iYlIZQ5n9o3!$Y?zuS^TA zVZX1MWzksh!ofMcua=!y91qCkiCj`N;+a;kRMw{*U58jntxvLVKuQ9Vj7~^2caZUe z?n=ap9g8#)e*K#fPv)SwW8FQGvIbUFgB#XXbFD%r{C-o-#@*f?30M2`=mxzaX_E81 zu?E)l(Q&QJ0FKq#$HXXI8j^O@PR>GP!8N)`VcI-Yxb)rXiKzZxA`rulI8t%`FVzN)>B1B^26Lq~xGSyEPYumPIHuW6BJh?Wwg0{{DuBqjn`lps7MuB8 zAwwsU0Xza$83W7=_Lu$+ioTw=2@Qtpst4jS|J%Va2gm7UKP1czNQ3~gN;T?RP zbC^d#V}Y!ch*}&M31~DWfw+rdmdMo(NV(UAQ#Rf@J@5ag^Sg_<EavY@oW|(L4EE|q)PkUX3OCX4EO8i~d3)$^ zFm3rR@Q{Y-H)-r{{I9v8|bj#;iRT%m!Ln=@ncT8NiDoo zS@{Cu3YwL!=QS!1ZHad3oej$C-Cy1G7tTZ{jBEF#?K&oLr0cWm7oO@;BLv3v48qp& zcA!)j(WF6qI2m=Df&MSLDGduwzrLfgnbjpJlG#r}3*5`cMUx2hD`-2r7p5uCg!PFG z_f?<1cfYgMmL7Hxj;`vo@^0Z*xQ&Pjs(v6JBgk6Zh#)Vi=lPGalp$?7yFNxh`%F4X5IeT)_$o2z9@{?f3%bA!a|C~2-HCUvIj1?w_s|} zwj(O#%!tuYPT5JD{TbVlCU+63$2df6RRK6u}#*uPNX}f{K zyllMEdtH<1rL$cNf)@H1IMZY5%;CNw1XGSKsLj(hnTTQFksJQS>V*IHG)>my%!mad z3Z|KX;%b{xE!qZvZxuL*@8zf{d_C`-275oN@Lmav;4c#bCc`{DDu1C<2e{5*bu9D> z5RuT&xb-_Ja-OKOU43(*o7go&uT?iJmd119VrK`}WL_C+5OFu3Q z++~L08uBwC3B5uI9V{{PToS+TH%Oq&TcfpX(ME(>PF1)Z<&{e3SL1q0h@~C&9v@>! z5dVdPh$@puJH(PF67fxTM)sZ3%4lVNWeiS}&{`U=>`fzhFN&U1o!wF5hwfxya$M5`y3kXib3Nu=d5D*I2*7B8*fczD?3Z3A z;6`^<`|2ni#mQ3WTq@l4$K+ zlKyX8-L$lTl?0e)HAi9cE&*5*+%%?FqwQu0A z!KHg0Beo0PP}blrFk}$B7;gfk`X(}c!RKvT0Ze%tSy0VQoDgx=g+61_9;4~Ds+G2aknP)+^?17COn7P(Rn%JHcfS?2dn+uUR z!qT$Kx(hAzdS-{@Xxsw*Zye%UjAGnv2zM7m(9Pd74ENS(4U2B;cNybBEbr83k(^sY z9~Tu=?4CnbX9~o=X;*CrUj6bhS2L`j)k`67Jr*ukl0;}Kn-2C&Z2<{D-@wkh-L}w>mPWr#yWE_Whzux5;X)H}j z>=MYOlG4JuG}-_6c=&AlCA_CMQH%p10mnkhBj$-L$m}vI34W9~lima*%Qgd&z_=QX z(LAt;NKP1>Yi^E>BpNe$0ETe>m`ROzrA@XOwA94CZ|eYZ6Fh@$@N!sQ;{@Rk!46dx z1nC3^SE+oYYAY?AoJNC7Uzg$IY?O4(!bYR}$Ej5816$bRYqcd8NHMyv>ej57+30qd zjwWc9rkK*vJ@goW9TEmTC*Mp2E@ehRzzk9zEB)?WOB`tg_Ny#r{t5A`7kIp)Y#@Xa zZ+sg&x-MrW2VRCJwKJ++Q^jyJ+z*6jAA(rXR}<<{^o;|7WbFwzmwDfK{Pk^o*j1vb z2;3311g|thMyIlo!5i84#bgmW#J^~D24>_v?BG#0JLtqM>eWES%JkVV*)p<=#D<^x z))n^TT#`eUYoi%8F_DT3CZ|vvV7AiH(nOb9@`Kwm!|*wN2m;=3gVA%%?Jya6zu-z=%T#6s7?cPRx%sc&oee>7BI}|@;Z>8bxzagpGMuFbGK$>+O%COS zHuwTyu2B++9bYrpK2=;;0*W`O%jYH~6IpDdnKSg#;HyyyJ|~~)ARLr0trmd#_onw%D~q#p4+_UMk2@M5+~+BToWR`zLhmvG}vf{I78;5s}}0c)Ge_mP+0K zsg(tzP*w6Q_~y%Dv^3!3=1p=>c?eUBcm?xPVO~j)i-w|&IY9W`Cma%BOm_L#57VZ7 zBWm>0Bhmn#<*C^OtikHSCOB)yUuLw4gbJ$nf!8pxBC=AB$qk(5^4pldo{xAdID^{q zazA6@%mu7hn-4KPa5I@Ec5C^Bh6M|n2+~m5oB`9`5`Z7A|3r)~m2sfkq}9N|%C7E9 zk*UiF(saTuifvoOp9ISbPu(Q(kCeX5qMpWLw&#b;`)K@*s$?G-8Y`vV#7EXpN_A+= zM3AVFnp4534{pCD=_bl$M^7Cn#4+Ri0=%!XDP(sV#yL;RX#GJ~uG(bxd2~^6?d0+~ zJ1k68hN7V3ueM$tuT8@}Rl;^GDSAw)Bl^HgY;icZabeU{i4aa~bLLD%o1pVTN!5~G z{UIZd80)GX(kHABKmMlcA!WQ)5s;kWmt9tl(=8yk(Z7{@d%h1;o92?kRvOC8xA7+< zsJ~DFokPNSn_5sf$^#yhfEgIW=n-Ms^h75gjLnJS`GMx~MUg3>w;U@O0-p@;JT=kv zLu#>V!GffqBpmV$jq)EmAcwngTMX?F@82k!99vLepN=3pot56*h+5=$1`uU<;Hi5~ zd9&#hJ$}&9i?6Th!b&b)duaqp-*jSNd%fsEM@-RYP;tm)t5RF$6|Df}ro@+W%$4dIdS84yi=rYv z)j5+;HgCS;+OP4ljv47Lv>8^&>R6s*5BrUBfosHaIGd?zCwIMHTVrN^Ktu?XpQ;s+ zc~k;)<^R6FM0MABE-CH)I=oSwJE%8kBjs*ITY?1`Z&omK2=p2qbjs;*8*EIYfDhnSZZD}Az7(EK`d{h`PDe&ca$?rr+{M&Lj2B~YvT z>lU$@E5Ovd|7j){Dh8wh2{kgOo_Y6>;@0-q^n-MK)?+4y&|NMqt6v(Sx&7ng(1d!U zAM&IRYa&o6VBt9B~J8`>iD%#&z`zdBR5sO z@CeOPJL58+YXSqT1lC4! zhSj@M*zK%le* zjX{!KH!5rzN7V1G@Lqm$-(QAhD+v?*$4;9{ z?`y~)Z&Q)76{j8}2%Cv$#xACvU;Dy_bch|znVq5lUA5y@FJa}>2pZUXFTA3pY?Dk+ zz$>6&YeP+;Wpd*rQT31ptmk!{Mn9c1%)9wB-(eJY+%h>+l3bfucDZRFnW$@a`@Bm> z;RB5(8vhb$>*BKgH_js5^@i4rse((fG#$U`IbCU2XE6bas&Mul*SevjGyAgaz5&~w z9N!9GQ6tyW0**W5cyd$046Cu=QJ?K2`rj!~U8+M7^7iMbmS*GLObvIFo*GM#=U~#9 zGwD2U3HlxbqT#%|tbJN7Uqm--p%`zlD#m}nHQM-mYSe;vQwzXAMc3qw%f)jA_MLEy z^ThNzWTMJA(_3noMmE&%qbD`Qhge;`{Lrm0H zy{^Y#4OWzZ0eh$-`7=yR=K~$lVH}!LHp$yDlaN6Pl?a3_Kp_&ls9%!~mFcBDW1Y*f zb=hO~-gcza_U}9J*nR<_otGP|BrjGtNjKTsPR(9W4Jo(WKMq|Knh^jPZeSA8uaIIczy0-O| z+?h>lxt={A64H|d6eEA!T0!8?N@?Y~ifH=Yt7GD^smM+Fxo-e@W3>odSgeSlhD@$` zs~7w3`a^JaXF<+J=!YZJ>*t9;|?3aCj1B86=!X zUY6qRu@-TQQRfw8Zy#;E z1LMaD%yt`20T)`+ZrRV@{Z*lbadjq*Sfl%qqRs)*?`G}!i z{rWe7C@ftQ%aGjeH8i*t!GHiZ-Zg$LE0a!II^f(=$3<>iQT@j7@Qhms=JQu}g!Nnd zd>J7{3yeU1cRNPcWCsQpOGt6or}(*|L^sVGY))5X^e2T1;+~uo3Y?poNYu<|eBhLr zySHJ4j!ZBoP9B`)-n~ zmnnLL3su&mW1BrFYO%h@=nM=yF;uD$JMAqX8idpe_rW9|952r61J$emIJl5UwWYrG ztDqH6ooCMgim2_vSVZ0Ti!-^*9NFjCSR&=yP4s01u&0;NP#+b+nh2Bo0p4xPoJ)_{usp&%&`eR@`~w7qP|sBopK z_uQQy_+M0AGn=Z5s{x~dY^m}z5TkGtT{#|o4j{xYNXh%to0#<3)r99%8(Bss|9GNY zuj1Fft9DX9@CQB7$&+D6J!dy=pci^X!5=mN#*{i5DYsA)>_L0)-T1h4u-F`5y)xls zFgPm|NHSwz`*{RY#dt^HN&g5Iy?Fl2He_gz)y4r0JkxTz%hZ?jQb%Tfm>ETXMmMok z`lJb;Lu4}2%pKk6CUZtez$wWX zSf|HR()`)&7z4woBO^AgV`a&2jYs`D=q>`yHM2O>Ped$bn{$(EUZ=Za3r!wSc`dn* zwi=+sF1A$@N{=*zEvCWVVSB74>aA6uy+ue-cZ8e2JtQBn-a~K8KfzvM;uPe+?TJ zZV$?Ubz5Mm=hOn)N5OHJjwwq+lA7v*vzboxTMW_#Z{Y5=Q^<%b5}WC(H`Hlb2VnSe&Z&HW!hj83-9NW-q97O<;t+=C7Z?ibKw93 z^Zct^rrejISrVP)EQ1+CiT94(yqek>@Tv&%Fmbw?6cHwI$#IPpqVB^@P3B^#ww_|e zIAV1s)6YI3o|?Bp;U{VB+UdaOcXZttT0PH9HpMr2M+yoKWGYmMX%&3uiswhO!JlWe zZ;1{#6p5ML60KYH0^@a+6)kAwEXz|z`H^*szq8UMP9$0|6i@q|<&oUh@RYfWP?3h4 z^-A@xFcjl8gy6(mQn1PYFS1bv%evk@M8+>)AGzI_g;f)XqtYa5F=(^{62D%nC^fUA z606;31X+}QJIt5R=(_aO>8lmn)d(q2%=?Y}I^hv)oT~w$ThGTg3t8g?C7dGlo`*g? zy7}y+PIW%_TyoC2(E(6M{^*lmjx53|OOnM0NjmL`YB+LQQNR;Jv(bb>kS zcGw2(cxnL2wP8pTNG7H3B$l|yVQ`s+ceZ!}YDfJhk_dZ8g-B?i{^7Ux)E^W7xh^~y zAy0K@o=~L32+HHY2Rd>*&JO)Tnt#xSr5pG?uIK#8LrhxFVUVE=I)^4w)nydd>1PE#HE?E}_&&T)al9Jal@{WETx6WK*?tL6y zJ2heYiEe3mdN{^5fm43d^DtxLKDQUGrAykz2c)3-cfY*xy&c(GHzZ%8I5|1J2Pm?i zNNYah=?Q#~&z`j6&vaIboGiq*-)P@RA$oPIa}5}wf^x$>ua7{GMuK|3B2eyFTH%hm z@bjNlC6ybbgfbORL)Wr;V&G(sL9BgTI+<6>nR=DN4-BFtgh>uTCQj;Q97c{2v`vEu z%8v|jSsHS;TZ5LXJif>P5jNwG==DmS_o(+-w_R53Jg~LLX zXTG^nuiTQqA&4q~&K+<*Rb{+pWfF7?bU;0JAmfS8cSrf>&5mV8;a2_kIe3JENv6q? z54GnelB}Cd`pQr{CZ-p7@5{`i%5R+M6nea-vyL|p4K%{h^X7eiZ{r>9mggDqB?0AYHq@KWU1FO)Ku2RpHk;nq;P*$ zfb4AtRlk8i6?EyRB9d5>8CtkT;-?a=y&ihwxj@LM6a|wT<4f(MpwMMOxBXsim0YPk z-89PBL_<}WMkbBzENSK3Vd0irK6ms{VR$`vpQPtgYUo+&`MSO71I~~*Ra}kZa7W*a zSHEx)$ZMrkCZ)%$$_*E3Hs@kDc@-J!fBwSC|BMKF_R|Z^(EV>90~t?Pyns6MH#7QzCD+?L?7|ZA7??#Rs7O-?%^iY0mIy%viQQ zv7;RF0=T(eh)2uqfyDhQq&6@2MpBceL6uHa4ZWRAi$wsz@@mF9q=B>xfSymv}dD>CVN9hGysOS+MNJeGT~n) zN!>;cr{6r`uIcrpLj8u;`)e-2VVSmFslyjMu1&O8q3Jqyzm zV*wAvSq2(5S7g?S6z3wCGp?SNPeoZPLU55+GSy<-p~QAA z_;&7rw8RDIeGG=2hCeWZWeC~oe0w2?dp{1a+YvWgu+!_FEPnIb}6JS&84(0?tODpt)g04Ov=R=RgIGZD=b{QQS34u zFW-eLIl?v-Q7VcdE~!V9VZPPio|zC!x31u6$cEaQUTz+G-8kzUK?F7x0VM@vd5}&jBd`EXyepWyYp* zJf6YpOC#bI2y~@C*#JY}!K>%!vU()ESIY~22T-60AYQMNb07mg#I?p*v^aUxuQqmy z+YCTQ*z3@dtZU$SLRz1cF;z=KL~Unmj|3(sPk*g=nr?_ezD{N^zKn*JI3sLiQlDOH zhN?+AxXpF6j49GIPT#R6)+NuLATQ34ad&8xOAhK&0C(virq6)ktO9mu% zd(7KV?>$X$E_dcHfU>3v1F)K36DM+{Qfn<`U8U)aKIGzMu{)w`gRLBm-CD;<2h{mJ)uUyJz$+{0dYtAM z1~GJR?(y{t&}6+2W^k#D=EfKe6^<)RgZV+ zcgbu{X~Q;TRDN;UMvrqw$m^L6KQ0r-=0Zjt>Y`pcJXB2^u;cRlcc7J(ol^l-k8hEK zC(_KFcc?7JitbUpwVG^wh>&r720f1aAik3DU74Z5NF>CVa`ijVDoQtJIia7R@Vqjw zf!fCKOyp`>w>#b$NP`{Qj2q~YBUV!4Wje>BNF~{*FG2o4wj3ETl5^(54Ks-YKsqy_M{t{}!q5oX6FqlcN4GYLP zB{?3LHnkyumP&SOaX6fp9S(_TEl9{q^ycGJSv-4Ou;LOhKn9{M%A*(FI>9@^lm320 zRvR?9Fn#bfw4RnUxE#AtY+_H!&YRUudG$8CESM~hDR#{v?Fg$pD`ubJI?w4<7R?us zuwrnb^i+|6pNE4;qMu2K|4GFI%+{N}pf32oP>DklS3e#WL{HfJshiC7E*wc$lo1E# zZAZ5!r~Vli6r0cT1eBV9ZpiN#P`?2I;INrF^J+7yIsP<||H@X`PvKuske=;8~d3da1d2wG(V996| z4OhCZv;+FJlsM2$y}(Ks{S#2PJcbvqTyolz>pnz2v+OhI+y%XnoQ@_+gL+FpWsB`} zSW>8^6k(3MPM$YHUoC?97(qZ&_!Gw4t6o=C4NDJq%8e31JvatH3}^}hF-|rQq0C*# zUG7q<8|5#LI&){xdnaV^0Zf$>!IzEcKz&zl^Ka5pcJjFCW!yK4Q6+TvyGvr*ea)<2 zKrqGv&m5)rkWccDl5CHd*6hUs=NA5MCL}MkbWdMyHvcHU{VEALeOPSCF3TLO-lGTR z^X8~>MY6UiD`h{v$xo?~b2)*=$J^E(sbkAOG;Z#4H)|Ke!T3Q|8oRAC(aR7lwHX}P0FQ>A8LQX=?u zl(~FZ%;~Q8vF|w$IQarf1{&`iRq&AuEeIlZ~wL-Sz>vS2E@5# zxI~u$08!yQ`S}csU#)?^Vdd?7VWN##Fq7fyKC=z5Do)AvzokgRDz5_FD*F%Gr#Q=M zn`OHj{TsI(?i=}{11I=^9?KMIGt`H;yC5>Gyh4}kFT6PZWCjaCYMcS;GR0@Yij4I@XV;$;aCLe8y$ zU%U?1Oo>l^U{dX7-oC zeuKZg7I+=5E+yK2%r}Hk%$TVoRxR9dOQL6?<(t@ZKO`#Z$m(5RL!091f)||wPsb+j zqhYeH2+Lu``8da49tsk-dUpZ9C02aF`9IHo5ZZYlSD+^q@>asf!{co19t~E5m3Q}O zXy3d!o56$auhqt=O|jgYN$tR{IfnVb$KJ2UxU_Q+cL|B*FbH6zuGUg}hGRzV(mLfKi1s+|3lYnr5-qdEt=BHQg$M%> zc`M=ywQp_QO%gT*pJ2IqN4O22{El_%E4tVPlsfoTcqf;q@rg^;UxF!Utn0-A%ZCr;lb?$4x z$fx|mB>b8!8;>(?M{Nob|DxU};hcc@4|+%|)yR))C( zLxl!&dFWgP>Q`V2(3__Ohq2WZ$e|Os-E)i~8(x$RSF-x(S>KT=lzM?Lt^Fn6svtNDV2*xM**V7u_!N;>)l%?lG&Cer>Mjl`QRDXIQ9iTBnZehJ}oAV^=rTBnAD% zsq70nI5J;XgjUsaiqKvVaMP>&G^RE$DQjoY(E7_ewm4JN-_Q}bq+o&qD~@bdHXK)&Jp?J|I#p66)9WJ9=D zq`RWWGZ^KL;cR4Z;Z`oD;je>9aY(uX%d zkBwWKPpvR+m5D6?#EsRO{zy5+`B4gyR2u7Lt*2A!_H?1lTLo;Cz0iQT8M_`cN6a4%a|C`?P$A7H2#J)%^teuqZN6j0JNyfkZ6GoRve!SETQ9Y?+SwCSZ z5&8JBz0%Qk%U>KYz!up3p{Q|lKfR<+IZ+Ep))o8WH@$E*@xyC&aVJsgM!q&vi!$a@ zI-M-ekn1Ck1-4@W+|0Z9!9^Gd*MTv9OEvPy-R+<0_u0II>O~^?H8|yI7_AXafWc$} zbG=thJ(%m42GYcq?0F2^C8d%VAw9h4BDb8*)zn{dm%wKmoUY|(`jVWmX{7^lwGuwL zQ(_P#S+&x88(0_Fb?);D*$fg{unj(y==?#J#wv!++3UnHtPKPeX>DPF)F?EvRhoXY z56*j%iLN`vfLTXO_uej`Wjp!;3$HK>SYv9UX2HUXO=@D5;l@h?Q@3Fpaffgu_;Wl} z7|;U-FV^kR3BUbGpCrP_8A9wSqF{t@-(IBhO9+7VYNaVug8%Lr3C4}r1C*%7KaybqShkBU+f-f2`?x=QJD!^qxjhXi=$Q+U+o(?J3S+kD0ZE`P15L6di zUZ&Y+{j+UpI7q*`;#peZH+6_JnmmVcNx8dOhi0x_3LYjWYg=NLAb)2k32%Sm@ybEc z^hoH1U`aN0>w@)ImfzW}K1{dn*mK|(A`Y3Hr0p%yd` zVC!w;*L3gnO)@02#!dqlbaPa!nwM23-CcZ+`i2MPSArWK#kpr9psJG%VVN}kP-Ev) ztkl2>uTFA>j~+wEQn_2C{wBDiMwHHTFK;@a@X*JKX|Vl>*RyhyfsQISFz^QwgP-e` z>YK~YP>Ur)&M^Eu8#rqzK9if+NyN-1*hWy5M1m5tU%8}`jfyr>NDvV$^;E9U~a!j@&*5%E)+|W&aS>#uGDnTtJGPIVB3fLdU(*f0TS30 zKJcz5lmG}#xV^iNHnB}F^qX7Ze@Uz_W>^u1M_V|!qkU8m9*l*ZJ8iW*lFJS;ITJaCU|{gb!f#B zxL^EyWnosOnxJ}c52c5ApoDUlhU(A>@%>LuGk3ID{?Pi-*sWC5S_O{i@`$&>ygBD# ziAC3iIKt)T;4|NwcVNCIq^q7NzaC2UHm){_xN-o2gs7} z2m^WH*_IKUl_q2$hmDP*q>zvKO!9+j&)HUI&J60kf*B}tas?Me$y;_n-xYbDb_q{Fh2lYEX51 zaaL{#!3{C}#I`P>|M9nLme0i~m$jJeEHiHzHgFtOz>_wB)~YVxK|<)g1!=%VT}WbR zyupb2Cz;2gg8mH*s;7cQO!PkXPhp*R+#ab|byyqPA;AJflKCtb9}GArMFRNhjywOM zDPZn3yCQYWV`MN)tq)2<=dXb$?%nm0fGp;oZ9Rj$He&ChO$(ihvPSF`<5i7vurB_U_24Zg0_dIXzKOJ+RG!PB9T!3U%>K ze2sVqu&h02`l2~M5NY--JG*x*ed2DnEJp3*?TJMANba@k1Qc)_e8}Y*SCzq#zn4*WPP_L>&kujyIK({#M!Hevx()gY? z&WK->B>bxW%ZQpe7(XHpiQca<8}2X7+!$f;Mcm331!@6Kj+LzC$R~x-qn|Ru`y`Vj zjguxQxPJ77Rp^Z+i7r!Z2nQmvvKC>of!V>)bUq+;%c1r8eKCmhR=`^Cxv?Q#{mSU;x;D8azEN+80Y^t zyKjH~buqV<@C4p6cK$z7TBB~H#ZnoGylPw}{PVZd(^?MEE=znZ1Jmjk(G}$95!ji1 zTOGKPY^!F;K>~}rQzD364`AAsscAws9WBDh+A^9&y=uCDTTh4^d4U&di|DI9bk$@h z^cYZO!_d*(Ugbm0Z zH@Y_h{Gx;ch-Zk#!=?zlMPvQ5q`!iv#r zeT-i${o}LUI9vEX1nRthXtPRJbRDW0+*Sjq!f`vx{fp2MMSd2$=0W-`BY^{u%-iwl zChX$Tu143@0eAgkVT@4a961tzq8geA-Vr2~%wVPQ3wicL{tDI*9y5Y|+Cj^6Oy`oZ zhg>YxixF%ZS}6mxGWA@+JZZBjDep_wY9DwrX*O7lep?3tU!Q&{$QFPD7@q%Hyj%nk zKi);bhGYScXNGr5zC&mQ6010)nVy}?WL=W#mVU^TU3^_m?xG~vR4)b$sCc6fn(0l5 zT`qQ*hWVezJ&bfs@c!;WeE5VIg{=FPNV=>+zg8ZTgMUprXP_BfKwQGWKeEAkAAkD*bjyJ7~Ed2vYI{P&OI)Nt1?5d3D2baVvtG z8J`%W*r5#RrC)Pru0un8gjTmE2RA# zAc2%W*nZ%K&n=7Uwi`0^ttqWF030GZyFg@tqT?R6!P4aq`y-tKE%lT-W{uS&PFg=@ z)kLDX`wZZ#38=O1bzGsUT}h@^YHndl6k7AxGHAedtM+BEiOR(WaZQY%S>!Tr+vm*o zn+tFpuCZsXu7*4|l-=p_XWGWs)LpRR3Iw>^od_OIJTywrOP7qC3=gq6l1iT+mq7$G zuP^~XN;()?@U5GYr|^GBxtJEn&2&gitny4Qyy}J8=|NdPgx(@S6U6A%llYbHiq7fx z9y}h;l`j%^Wt9cy;`mMl@V2>??{vK2rWKP9A$05m{^mfx!;J=?B#{Ht%5X2rD8~B6FngjjtxT@ZZROhUG-$6(msQhN zuKYzLK5E#*kEb$jwtt1FVYqATI>Mad@=&*7M@B@%`E<8GYeU5hOn*%VFCHJe zW#NSyf^7~(O#NAuK4aZyOm5X{a^+;K7|mhs9dy2j)9D8xkSSTB2WUSbZaAv7t%%Qc zKF(eHwj(1IRxKlrzU$+P_;^)6i3c?VCvx&3;pat>#3xZ=!pD*~qmhlB+iIK0HI0DI ztSJ+c_rH^NPe6fzls!MU&B597~uwJB~+f1w46-d zk7wy2*J`ocd@9KAF70cnV#&8XnNHi-y(O74M4C|i2l6+@t4Ati_)~3wK~2QJ>3f3& ze--|bOti%<`Se|OLg}wg0J=3+Av|R31;)u35@tfcuddLdrtxJI$N}n<8olbhs^4?$T0Gfz<+=@EL zz^uvv&2gz!F1@#Sl|0 zRDU@8jPqSDTRwmYkrAHrVBKl$?*PU6c>JNlbE5kve#dlhch`)I8>yE&!tYt`q zwhm2J81IUdbJ3psDZ{rS1Ov^t2~Jr)!7{k(8|40#%`;_)1zpReLA#-5hL8wX*CI8M zElqf-{tO=BG3t|%qmeJuGoaWR>j5=rR>7^#XR>BebJYRQqVQe_lloGrnaLLL#0R79?W@m`Mf4(^BR9e*K6x$kchEeS1QS) z$tcmS!C6vfK>*0caDd!nrPwC{j;I8wUWc8g<@sabWL^b%xa=oIQ7H+nezq1Q?}kVY zqs9I44w>S?+S#v(2F<0$fp(p-@({Y_m^Bxwe<32qO~dn!J5_RTx#Y93*FsU)+FVTe zAb5NSBO9BSvfd695Fg`R<|Uns6JNKX<_w*1Z0?+(NZO3-@gR-H&-Le#c2TvfJ^Rc5Sy{CjmBqA^O`m zx!qjGv8uJ*N@o3}6exQO5Cz2De|Zs+5(?ddDFFR)OD!Q4HfPOqtc@PB<2HJz$edv7 zC4eTc1?^`%fC5BE7<~?Co>C#|Z+;OxkXfY(DfV#v8g$pvxW0;fo~SgAYVpz3FMgObn2|*@8!85{@(B-b}gge z`mQVwNi`cBBE29osimcMmtWJ)7%6#HK<&WgEte@zd?|7LWCsS5I$q%Y2w-8vmFj_$ z&nkZ6DVreYOIA|-Z~}m(Bp1C({~PN6@JU`dri20@yC9O$`xfh7r|8xeb17uBUwb{w z-NsoFeiKtT{JeuEvkkJU#XZWlNtsC)`~_Yg)&STGR$24ML#~2>y7z164{bgYjG{ly zP^mQbd0I9`&BcR%^4x~a`Mq6U1?n+hT>wJi$f;ombYdZqF~rg2FuymjfrGgG`_R&} zVekb=NXlX5hS{NDQuRi#gXt^QW9bkc3Kzt~`&~fg_Rn)~TQ_tWNv{JKZnD)qMU%RR zPr4puY)M;g0Th6Hg7ebrT0{Bu&DQD1)zAp!CRDV=0N+OVaFV6j1pgw!I6*x4%5M>< z#mw-dtx|?eL&O}<&8`|?L7r9DsEOH?bIbdm#A_&EEXB2wZCWY*W$2&0Zgzb?`TjI3?k$?S^`^xf zU+I1JAM=lBqI>zH!_g)7yM4$8 zv3*8VngDhI4M+t&WCXL)nGoUm|8{EzZtG_HqftqKwiGFyoVmK2h229{uiQm6UvW&x zQh@zxGr2~i$_#_CldK+=80i*FNOa~jgI|0&DBeV`sY2oJ?8Vq2b8L0C4ZE3oCG2}g zP3N6Chf*s~*=t?I)jI+L2!2Q^$U;(^dgkIB!L|x%B@n1W@P9wl8D3FIEj68kpQg92 zVt-pFf0afcLhHZF-FtZG!gXFusZq`7{-V})@ctirp2Ek@Y^cxCJY~$r0!O%I`L`tv z?1#fbHB9yI|14Rac1(SLJMP<5oAOA4P+i~9AU8Y-uld)0ROrHoE~`*XKkt;Okv;_* zz$$sansy8{dK~%#KDqo>7a)AiSU(9;ms&}P4o~IeK34ziqZ?KA@h(4;>utZ4Vzgx| z@^vXK?Ah=%s>V<-?f;B`3eWT_HbA)%_;aon4lKFL0H$CJPF8KM&Tjsitbd>Hrt_B9 z4C+P!2$>_6oYalG6$b@S;%UCkE7LZJp_JakwSV|@g0;y>HTUK)C+z`a=e3~ z*~M{nF{den+b)hl7q|dkH~n``)I7yW`d`JW5sVFyf_Ws&OS)8 zIC>w^`IWGxV8Z0&)jx)ctQ4>cHZa*7NoMDQN9E%NGR=5&1U6Rxv$EPiUvmvVQ> zb&zIk1EM|RGJ}jHo<%<Qa9vl-})5SLq6vd33-UawNBnYpEi)F6SEv)Ea#$i9_k_t9vg#w53lB=oy>ez zs*d{#ZXw?&MAb8Qp}F!x7Sg51zNOu{u_i+N?3_kuuCr4V2^DDz7`z1--JNZtjEmR1 zOMZ(KwDsL@*`r2E`?tU1g|o=YVwNc#(I#1g=T4>-D=+ua7wqG_2UCIetWJK4>`P0hO2UI34tAI52AT+kRn>;rJ|z9GnE8oZ3m)-p)@^-K8)4(d#+PFv;_PQeseNr0GumKg zYm!S}bzerz#3{Wj7{fEW_PWBDA=dqWZt|ozgOgEt9u;AD=fNxj6Nkwz^FQbXDNWA8 z!^`P|!yY7o;V(noBS{_t_qfVzaG9e^L~(}}zU&3=%Y6R@47h3PQyWciyQEXAmbqaN zhdmipCN|x`fI&Wu;E6?rpGxsQJhBz9v;o9cRmaI-d~+N#I$qDE2h#Xd3x5BqBqlBypag+q7(9QP`KbmKk{h-Fc#Pfd|~U_&V3TGbdZhs z>Ja<;U<4LhS9UzDNs{c>FgGC|iV{9eJOnhQ{#DxZZ6K8(RR&SCJD6=UdiQ+(3@J~) zLqXsQlF#W=%cGxMfP!vn5-c!71293`e*uM7IGHN9)#UHv;Ezyw2L`{&bZ-uL=DJQK z+eTr$1ML(@7hGA~_-zRhS~oblJRMOt#@p~7%oFF<92xC_Q_Xb#Q_4h`#z9F}d?K zpnl;n2V{aJnl78;>!fVw?*s3ap0>UB99}fRml()dY~0+!6Ng2FpiK99M|Q4M+@WN@ zH=MxGx$EZ@G=mXxr`e+q61m(wS@>=wqR`sbG9V-{U%fsgXzhmnTJIa6sh1PD6sj&{?m+>eo2R_xfgdB9Zo!uyM=a-b1kb-KSj zq%D^31LuTB^5*Y1#j4K(b9(jPFSsHM@YI!A3*C9kO@KwFzEP-eQ=mEYpOc&E>7{3p@L?pOg}y8 z`2yZMwal(9Ysv(Fj_)7@ac5QMil3(Fqv&9Ovy{9?Ky~FyrD{HE8o4n!b0FopA401N z{C(@+wTDuwGZ#U_snJ`?eaYJ??U>v3a@3)*9n(qe5g*Pkw2M8ypUHB+wBiQS|a;lpot3 z0*r^XaARMytjzI=++vc*GmAPOM)|q>T{e}(AL9kv0Td&wX(G~3Q z+6IXxozPJWSvd#W_jQv97&uh}G~nOa+bldL&U8~=AU6J7=8*bx5^{^@9Jn2G;7uTc z0NZx%mlOU35Pe62X~?WP1@P9tfBkP4xN-N>iIgeH3*EoFo@>zM;KSER+_yyThKqpc z`srV6DLs`{Ta-Gs801(ui@2WEX_68qD9ZwTP(u=K`Z&=pCrGoFS}X=XTWQ}CLK&k8 zN7AWy4J`He@D6cDxveYof0HRn>qAz;SFDjjWhtMaGy%47@Ev?)!<^SmM6X*Iwi!t~ z`tqpvBRUyz8604FN6Rx16+g*W^0YFtSPB`~I?R@-F2Wt{Y++d2pqNQ?-MeZ=|6x%IMFBLrW#cS;*g^RpMAozZd^kjb@@@x zT66#*(USB!H{2378rIZE#I!#QD?9S;9J6!WnCnW zBM#b*EjAMLvnZF(dMQ1ZU#OE@BZV7cDV{@&l@yVVMH!-CZgU0*3BQDE!vi%HQJ}m( zf;YxhwSH40&WWDMTy5=^Ft3~AYFgE_=lvaO-1ZMK^#$fQnL?%ltmQ|rpv)=Sc5TfA z6IDG7Buo$yR#D5ntAjfSVWCxW;(s#H)3)9|q9FxxGQ#WI2WcQ4CBwSMg$FvG1LRolgBL2z{@=}3Vq&^~X9EvPOBUjM zz$O5KFC2wYBgsT=A{j36&^$4Kd~ks;TT#P_zRkgw#FEndOk@;TDLM8gT(^ip>*>nb zTf1Di(H~t5&^v`pyhpZCvfOJZJ`3|WZi8d;dLPzHL+WAsv2XJNrp?*F7T)}W`IL

CE^K@R-Qt+?0uEvqz*yo3?=RXzhLcKTLU^{ktS|9$axXkKPwM-&}hmA+7o6|=&t zYj%Le2>BH@q7rUv&;xJQQne+E92TFhRx}^coCf%FlVioXXSf%}2Xe&3!o~+?r{wAH zFT#e|O;u|w1;<171`bdLU7q30!H&H)MMo-tlXvVcO(zew{nV4Gii83b9rhLAc(f9=Uo zn@@D<*m1=#)SprpB?@(fFTCW4p#PJb?YZlJES(vMaE0adY*3SK`uwpr9ae3HTD^~~ zvixt512A@BsK;Ff@~9a>eGnY703RE6u=HWQZ0lFpEIl%Nu*Vg?+wThvm&gQAf?#aN`Gl3SD2qJosNRg_on^)hhzHZHH9i;$LjY1@ z;v(@J?VS(KP0rdW>dFl~b56SLryBNAiHJM_J)gZ&Z*#&kMk;7u!E~JJ-RS^h-1!@s z^Ds^@Hy`Myc%rFxXslYhT6d3kU`ft}ih(pIYDW`{tYpA>WZCg2PIMd;r;a4kV+nnhFl+3!3LF~6;$qmlh?2z zP?Az&Gtmm<#6EWA)gr46B15Fl5K>$zG8 zCMU(tqT5AVRAn=E4+{Q;TflIa0ft4ldO(Py($^kHb#*=t#VdG&8ay((G~0r-zgJv~kXL^*a)5!(ItOheA{#c( zzBX4NFLRYLE6~=H7l=eYpPGsBf85aNyfqF-$+hc<#V*`ZA%ZH_gY6KLZ*IMLha_el zuS#$Yqhf3d)Zlt{PKX8~F|Q(g5lB0lwHuBVi)QcWniklYuV-2+3ag8}=rxkQ*;}be zNz@O+NTm8c$!=0Tp(<+O&b$LKSxcX1dm+a)I~t5iDlKn8YqBC_gaEe`M1bPsME=Nh zo+1zprJmFcYjCT>5WgzjwWS^)3C8~s8{NWLMb)gBYAB92-lQm=BjB~sWuH#p;!sx& z9_rM&l(f<9x8cs)i_WpmlqVVM=aJ@>2G39G^15Q-6U~Q-)iNJ9t)i|Kt@;&?P|BOx z{Z;Y~#|IdvgK2Y?QPw;A3Ur0Y*oRS=+X&L6=IM zu-BJY%X?Z#u9)9&_vWba6@(>Ou_zqWXNflqkh{6yd*=eciwb+|soYU| zW4Kt|{&$Cg2wT=FVU-tE#qv7V9NC`zF$u-iJOr^-J7be6oVu`+#E2`Vj?el#)Xm7z zz9#_(LYV#jQ@OX6CoWPr{o@D`yw718xrlA2ndd;zwZG^^f&MdY^|@Ebs-Il!Y59Zah~FxrN*y2|RsqD;3<=awTHxpEIx?a`t-d%%A@IrzH8gZ=N3_c~e_mr&+F z!&agrTX5ltnnY6cRtKK|)L71rF}vvQu==3F%ndJ!`HF%moY&)irN$uO0hTw8k4%47(M-UHOFDD zzJ_#|IHWcBhS|tV^{%HGZVbW4W*eusx#}_%Er^6g1Kqt-lc|4+IzB$Fs>k3?Q8}nI z$gEFziHw5J2KX_-!2bu`^wS~qP@^tC%ONZaDu%f{`3M4{rMc^qZAaup-=_9S-J9a>bscuustH~e&rD@mV{$CFSYYnJuVfFJi!5}g` zFiCo6!$53lGu>M)!iyA5i{U*wiLMEbBKNp}7v1Oe+R%k>zuW*^1&Mr&YK>R!inHxS8(qW5CGJ6w zG9dF|Ta(nXD{OYpF&4E7fRS+e9%#{IIy9wlby33pB!OQS+|TONBm)`~K;HC2S>Gm| zelsS$W#p>E2mO}N(fOb6R)6^?QmjaOdHKq3d@4zRt|2(0aSUdSSUoINeF%w z04Eo`e@^4|7^Dx1XrZG?cE=34zi|NivAZ;NTJ#y_}A z(!wpPGFn?-jaJGvznWES%kV>J@e^>S75~J#i!s|cQ{@gki}fI!X>o8CwOOkP3r9e3 zSZ;{v-(dTbg+VuN%8t$HR2D)n5b9pobzR$;(2W+BK9%{++!18hM-t$)7?S#g?~+){ z>G@fK^kr5ki%IRbYWi0K2Akmv$Pg76zLdyH3i!sYcivvL0Q9*|D1zVJ3CEAoRX^o& zN@91~kWEc(&6PzYF6n{L{1zMuIIW0+*s0uXhteNe4$QDjq=Y|dZ|{)F~$na3I+!5D{v|1ao(GcwI>-+i1*pbE6w2h4fb*reb=aC?ST6^He}b!IRWbPD}No zB7EStug)Nq!BnVDtI;U2jq6elBBA)aqDF8f6C}wDDB;zlbM9%FP+&ZYrbnuBeft!4_6po(IJ+rS!|nvD)GwgZGwDO@>)YzJa>7$Tj`0E_SlKIRwJkdq>i(j z+T@kI?%izEK4eyKD&5HatC3Kz+K0TYS*`IziHl{zkJ*wA+9xEQQOO|21!N*;o-uYN z^o}5~Ks2*1>rgA7_LnRy@SwO^-}MCi&d&Xwm776ItjknN@Ik45haNIeH8L0WmHkUq zAyA5=$C!8t#(b~cR=U!c`JAG#Ol+jYN_{ae{N889)}#4sPYP(^5@zAj?^Gr-a3d*Csa+a7zvUE0NUv=k{tL7I@o)JT0b17{ev~Fks?crCWg}Prk-ww7EBh|UC zt9DXA$+2XhZvZlQ2K0DSuF$^0BDr=NCvgJ3?aDOvAM?3R=nm~f`kVUU;y-EsD-(Qq zAYqWS1y<)KNZqn$(46I0F8S#NcIoqF{yi>Aa>Bu1o5%TZlnouV#<_F`^BkoW%YXB-Ss_&TO~WjGerEDTu)dGz`LX(i+{DZ7H>( zrAfn7HH_5?u?ZlZeS+5;qckbnUO7R>e^@%OoH5kKmZn&D;`iAzR4}2aLMYvaT??`( zKdCFadDEJbPZ($B1qTezlrB@i0WxXL_8{eKD4sy9nmF4wAB$*^Y6abI=3vOEVhwIl z*WuOik`>f>TO^A|xLCk3P!A||&_Py&1%xULe*y=F`@3D6LRe8uePfeNmx0+VDkGA` znC!>ko~{pwOg^1!o@m-WBFaR!ElKX8-{PBSD1>h7u@;qV@p@}UWKQu zjdP&aXt=XhiGNmbpoheGKO5LO{RNi>Kz_jp=) zaNz;zkT~Xi8oZAl)Ca=iMnpcLWDt`)?E9Gc2QE+RZDLPc_gI!v0J-26%F=sBt#gp_ zH8@=OzfNG_taqVTHsy*1lPcL}i9F{o0Ex9WsI4HCdAJA?>gcGuTobPM3%k>8xxoOo za8M>1f-Ry2qId<7;^j2w_k*JWoOdAki^-uYu~mS~*oBkHbZu<^6i`zY;-J=w=>FIY#rdQ;LcGTGaxE*|M+QRx-sRp%f; zffn4?zY7@4NZMG#-QfnZ1AH0B+{U*kRn~@PY1G+aj?gyyJRU}zN*)@k<$g&44)_-c zJKDgi$W6$Z9`xNsqoznup4&&QX8CZ(XTtm(&DT>xp{xJY8!P#yZ?cSWP?G;(x0br! zT8{17`Ko=^o{03=d`I%B&gAg?GMuy1Xo6jZ^P)_#Sb4*k^jEgR%)Oyr>Q%?VqZxfp zyo04vtNP}ykT-VYRk67{z?IYWas77*rD;Q;x}fuByWXZBS)8xRg{J}W4`jqZlBy?2M}GX?_0WYEa|X&b(PiGOXc zCtO2TAqaXcmZpvjiDj9QwpT9R2M!m@>b&hKV-InXp?cwUz&xO*-6|+S6rZmhE^w5BXXT zP@S-k`@$&HXf}JZ^xx0Gx{*JhbT%Dwb2tpQUOk-~ereETyXe@6Ch_uz4E@>DCi@#`MO%pxaOAluP^g4N!Q85mj|=j2YFmg8lgtq@QW;1A zm}nq$WH35_FCTUH*e&rQ@5A6RJr>?bapyOTYB4{A-N2)04wc1+~SzaTVB+&{eroAHLVc~36I{Hd^MNM6h(>DwYp`>4*g+}xOB zfLeim&+EFf7Os+QKOURm^*_JkL6Wwy@X?m`$0p>%lVb9KkYGcrf)+4;y(qhF@!z8W z!Q@HrmbV7}l;*%Zd&_XxBWDnqnU7z({T(HuIS;T`vvacL8%S9*abzzf=>rc+@e5c1 zs1ZCLmTFr<%hRETcCYXp11}?E=swD zj%&eAO2~`Ryt)kFTW1ccQ34WsHYXIUNm2A<^wTGx7QgqioXE%r-F>D+S*;I!!z0zq!aU>?Q;Xk-uA2mV-T0%YM zkIuJ(jo@wB-lk@fveNsUxYs3sd=- zg(wG7e|>lZw1mp`JZMFSOT6<-;?87e8^T1RNs>`UG?<8j&*M~Ie|Cr4>K0iGJXP`q z*bg0_(1GX|KVsL=GAdHaK|@^;qb$T<;a4PJvn8}R8gG~}4wUZfDsk|KQgc;x^G9W! zxr)2{*AH@Sc=c=tq@;Pe6R!*}(*;l#(x;ez2=qkHFJ)nZx}@xN7BJkUB3853Iho)1Ef-a54jk;gSD@Bzd4A@q0n!#!0)$}+WB=z$^;5jbl$+%Z+D*}*B%+= z6O)#~He*iIh4@QP%85t=O`9}xCM0?CvmBe-Q9Id6?U6HcDfQkBzRf)v9IIPk! z%0V0~>pt`&y1EOok#Zk~o(4It- z5T^yRGO!|;@cXVM#lsH`R(|kyG1)0N{BFOYHLVo_<@M4;bsneie9irm61(KBYX1ZC zCQS!zNAAzyG^D|=h2CJ*xyQ&!xn^dY7nF&hr6A$^e%?*gopA2?Xo*p%3Cy&$hWI1a+JiCMqK=kzPi$mIeCH?pH?og^Q>>n>lDj~~qcqnB+3%VupGXksu zFE}S2A6$ig?^x#v;N<8&k>lYzymqlWGAU-DZT4STcR-OgT`I_uK1qH44?^Vb!9x2J z*)SG_SQtMT$ZlVKJOaH4##==vQZv?40*w1=;@Qip1N_WpF$@*g;seANaGcEGn^?!9 z9+}HlwRiibq-KWEeKw621%~`~BQ%Ez(kav->=+pXhrf{zL5TdD#vUR&?PP%$o#T=s zr^-G?pzZusr*b%ASadzi6!>6b?qbA<6$)|lKaPc5Ou2d6SPlLd_4clT6UCsZ4F=wx z2f;(OQ6O3V_|}DJV)n(#X`r9^g{{cGaVQn4d!Hgyj!CGnJ0n^@l1-trg8930bL{{l z)0yyRinQVqcwmo)Ov}@PtGcP;{2GO)pHn*~qu)1`V-Dw~% zrBspK?v+p`2gMhADwZYZJWd~eoO3AVA;-`(n9YtTH&KnD;8>v7&%xB{kuEoQ# z<} zLq%*U$$r>1P80C2O0arv{Lz07 zEqlL4pf{&8a_bRMX99oBU4I72y2h@@EXkV}l;^SUP-~5K17SATeja91Ng|-_0}pS* zD*>pbl?YSar}PDIN#kZjbLZsK4_5oKfsK(}3eiJX3@FGgy!xm%?e}XF(Znahe@qE7 zG&b|+50BfRoMN*E{&=oKvZttkPj?ib=v9KWszbtXgXpqM;?Fs;wO*n<#PkyN)S z1u7&HNn!wf)XgyyI44J76w5QJwfZqDXJTw%)LqZC42u(Me}Hd}m6TF_5}cu0Hi6;5|wjV4;-x4j>b>~@~uZAyeQ_RM53&e@^8 zGr+iR1)bfVbv-Zo?bq;R#&2&mda}Z!I zoRoW@4na4C&?)Ju@>xN(rQ${U5aAog+V;C7-D9Ii_}@P06t`4UhoGue^Kz@r9{A*F z1?pqrblBQG#vvtuCtHi*aH|K&l}z(9K%6zM_^f%Uk|q4I=1ExqPe8E0-f|`H1|COz zI_>9Ezx{vf!=F*`Y{2=~m3JgJcmbGt{$2=fz3T703=|bpSYNrQK%)W;R+>pMUZA5K z;0?%VBYKiW$JDi)6Yia6(mWN$?ae`(W_l!Ml8`Xu=C9-lX8yyNLlKAMXUZo76dhI# z#CL5Goa0mvU`1dE?Nh}vAdR*H6tXhQX*-shQ&+vZ53s3=%%r`DYFjtgmf)1L4DyqB zovSNqpV@*C;oQv*jI&qocuJ9snNz!i{j~kZ5B5*%ZIsJj2CII@t!QEN$zWL!4XgkO zzk3-6lx-4lTu-slbw$B+;k&brvFzd=GPcu=GgqXTqOm?tF0!E3cQRPs5zGtyI{g(V zy2njcrk2m+DoBo8D$(%W*KtDuwrokq*Ha*b_7N60vuHLjcM$xHvIdw27~`vU`c-zW z3{+17+R&L@L(yR5awuj3%ApDu77+Gz^4WFZbP^X)e?_VfRib(Gw2;aw@*)|ypzJp}D0)!MiRQfJb%ps#n|K_-iP2Aw$L`BeOazP->4-Pk2s%%` zX%u}Y7lQ;plzI%JC2G=A-TOsaoFY#q;(Cv#z6Bjtmq$UeJj@ukwc;VBE|Cck@tLgV z+&(@Bv^Mkd6-c+NiIsh3ZN3UxOR=%MXe;VBMxkgua%>uTE5ed!Mnsf` zA@jKEY0~J#Og?##m&~wPjTpP=_P`sG#Q}d}_E9N9{4^%ha80U5p=XFJ4wW|nz(kvg zdAD7L*W?sRTBE4H`0 zecz+82{6TQ$VeSnx0%o;3Q1TadM6d#tfUFzMCdP80nxyosur?g<(vFX_Rv;tZ_duz z!({Hu2D(I&G#U$*43c@=bYazp$^Tr^XRf5EsP9lxJMS=l26b2(L;m|GDI|lHv7%XSKVT&jS za;ut5Ul~qR?>}epGwp5FwkjYQzxNO`{YX~SZt%Xqz8X9p9ka=6XnU^v*X zj(yN27p<4m0Bs4XTlZ;Dl&_W%G5r z!bLkRg-|Ro-Ixg5&&_KF@B9QgQo95pu&lacFdW>%>1#O*Pv7-DZ?Sc>gvHe?M4*s> zeY9~UOvjj29NSl3tz$sPoB5<}-n<@oU`|_jW<5i23Yh($eu+@arQ-^aXg&LDkO3!>h7abef7I8&~6DZ&_I=E<%4vbQU z|LhKMolRN}G3+4_p5zkI$5YmqZ27>ArS)xE$_1uU@&pALZAK>GCcc9lK)f9{JA<>A zH7+c@5}xp^BCeBl$&PVai4*wSPZQ;ZopHx#gqT5Hkv!?E*A=ZxVQko`r;-S%9YKD; zP71im4T91JN2K`31u2N6rZF|-JA~UGa(RIK^;VV(!_YYIG$FayAh0Dd(Zp{ zgq*7IWWH_nEh<@sNDuGPD+7>wY32z8iAM+;XvNz~o=&BmAIQ}zkl+!x6}yuhy;2*v zdL1p^CXXRB5zd0n)EoM-ZsuHY8WSm+xjKyEfb-2v#UaEyo}KAzSp5vR*Y0TK^Y|u; z?0OGs+^6xiiDr1zeh~iLijXdPf1}bC)zziOJ6R^fXDV_8)*3x|<7HK7>An7`CGE+b z!T6d@HzNvKe+(DwPum44XO1zqcFgqPi8Z+e3E+90fb7Al&!{WImodz@VQ>J3mf@9J zD$95n?!l#|2lp&5gUM-g>rDmNM7*~K^u}VFGgbZaIs5myN*ftxjdMrK@D3eHY(&m4 zOAX<~De%c)tJ8Ze+Rgd}7y{5A+Eq)Ox3~F?ac1byK=eqvJUDC~W2IU7>bCq=NQFkh zTh^Vbc%)($m)c(=R`!5k{jR{1@)m*s5k*)m!+b>!NMm0dDBqJV{fG zkuE}z6#&NfX!i-k*2H^U6g}R$OoFjK^prU{sz<+xZUz=NA|%wg?9=KjL$n}Sw@it< z@VXiOnK7uld-j)u6$X38g;EeVmU@1_H=2oGyLv6OjR%7|AsHpo z?o>zGCeqb=Ukw;=?leBZrg3QnWz{D7@I(r09euDi4BDnqW-tqSI(%;_qmCIu6hk%I z#ALjDbi*J_V!{B#va$+c0G>LPxiW%Y#X@N`?gO=~Au0!x%@QUa9Gbcoi2s{w8FlSH zYklxO{d}Dz;>3h zqUt9ZMz)_5HFX?vbX5 zwD6X|j#v#pvB*6ORnjaJw9M`D3WroQuxI_8pd3+3x*ITUX$7|=cM$6^Iw~MyLE`$j zSBBPzfS5R{2qCPN>pv(Iqy$AuZVUIk>5!a~;-pV2i>#bgIDw0s=`WQk-WeTzA_tx@ zLr6|CN&NNn$IgemG?c6yqeBzXS$A@KlS;&8#$)ByNJa4sO1H?UNKxHJ}id29l9K4vHlgJQNlkGT;ZU@Td1GbtxRrUxyhX*M;EHRmr9Em zBmqgL0`m1e?lu%le<>pCg(Tm@yRTQoTe;MOQ52|?iU4m4ky2C*VoORrNN!DFT_lvA zOj&fru|x90?PvGJIiRP!>6veeGwX)1Q~VF`Z`5Tn72_zds9jzg@-4#GZ{(Rylqjag z=4U^00t9*==42PdVtEDP37j&<28rY~l$1nw!fqEnxy}NzFAhpiw7oIk>PO8RnELq? zNbk)y;BV6rq8$t&umNbYET>Oa=`oXzwhb`f5F>C1 zhMRV{q>48~W9;sc*Tj0wmrUN5#cpggiFH_p9Pw2LM`l+7U6ij4U3kv-N-3 z^Th#z|80!FG+{a56oDo7aMepxV*)Mr=hbq zID4QK6AVFZUwLt+i+GJAz)Eluu^=xgD$m@lfCQ2H*+hEtUB zEnr$!75oA#%fncXN*AFwyJj@4~K$3 z>@7_#Q-zCcmuRZ$GT8_BmR{S@)|!W?_8PZs>+KAeGJQ5`2rxTcdT~X|FLQB6yMW{(ON`GW@zcXeyQf3tefLyJ>$tW*10|5iOlTdIS8bT0)adAA)w;=BB znSZCC+g{nqc|CCvLI8(YP9MVVjKseBmg(y1QhKX2 zT@9h|H3R`JUY%o&SfHWM`sYWp4S9Ekk>w@cXg+6SVtAwO=EL)z-B7}-?cD_kxFgx( zdoap^SiYvXslR?&Qd7#cnpb? zv7tZi7-r#~wGSrKQ=u~^%sK6w8&Z@J0z<^4p{}T+ONxJqYj-}5(GWXn3QT!>M#r9< z-pDCFB{(HT1Vt;(SWJkDMD81FDej-VR^Gd7a&ZNfWHxbV{`jR4z@|2`kj%iPQ*7gk zloK>uy4ag=v)`%`@-Bp-(!E@FV=ixavps30TIqwsU}AOe&mik9)DAc)bC|)cIgIpr z9Qb)_&f+!+`c8cb$~Oh;Ei;f3pRHVR6>tVZlVy9LtGA3g_5Ss)f*2mlqPQ$mSWZQ7}?q@MCv+5$zmBpa$jEvLi@lImj=P<)Chl7 zO9XUlQ(Pl#z3#a|_F(zO4bC-{=h%WrqjRPp4*Hkc%J`?=f61TT6R&j`ejT4IJ^AIt zsqN*yynrX6z8|oYMYY8E@6>LQ^mB25ySsurJ$=N}c`n|F;JnkA;PzF*c0Jc9H9CkP zBni3zdbU`w?i-NE7@)C-_`Hpl^xNP0Yhp+_+ovKbPU^OKf~~}j`|%@RSgLz$64N

HRy#2mSjV*m&X6ovb_8tSGL+9hxZMC-BZi!{f+;2_;-LA0EG+fUxJ` z6cSDaoP!5wz@~D3sVk%NI_N|S5*nf-!^K!Fm<~(ksbcSGC-!Z#f-5E4k(}e4nqmnW z6M84T#*@|gIlxNte>>{^L4QE=0_`nuk6AWU^6n z`%YSRuIIktk&Z$r-SBGUmC5byU~COMBSq!tdPP*z^7{V7t*Q#vV58xcr@&@=`qCP% zHX$Yr3U_;?%qFJDzNZ-O0m;!cY;XD6gbCNkgLmT0k(ZWb8)_B*=N{eJRnb3YmeaxcBFAN(F2>L z04DgQ!_WD3;88e7iYLqO=pBtr{a7)#2ADFrtBQN7HjFt!TH1qRM9@-3{@r-%{=}pE zAhIFTZJxOl%noG{c`^;>59Vo3LGkU)cA!)%%sstOqCI4yW-yn32W6w9f-K)_)n0X7$B0zfWA9W zaM$f&cDUWzFR72>Jw{hJVwa)0KaMr;2XQD*PQz6gA?S$$lco=J+D&&GccupPl{WHB z(hLQ!w)<+i248+evLBB9!68^p{xI~)Vlw$&$@iOe%N=rbvAx-`8W84$uCx5vaYP_m ziuBO6sw{=oV5rb>Hu<4atfDdOL-hpkC98vPrIJ=kqHmF#DW5LqMJio&1pWSJxvoBZ zkY9DtP3!?bp~59lffzUiq8mEWxXw}4S-+JPGMn=7eRn0!lJ`Er8}#%;5T7Idge8E~jFh`t}~jjLXGmJ%-X={vg?FRJxru71y({lfhSIChK^Pu&p0!Rf^uznahvIK zGKJNYbX7G-O;!~DJmR_j6pBKY(oGwLou+{T#$RwdwZDC_^Rrp3yW}e-)~Y9jgPn)l?P{T^ssH0H4H|3A1x z-+xhL`I;vXp1GV(k={&pR-@oh$iC>3k>^w@Kivi#szcLbHS8l9onuQe0fQrcCrbuk zBbVSw{trc=s{&}l(@bJvV*|d=KUok-@PX%3)=k@Uo-rsw1Ez1k9q>tRSZxp*BqER#e)~) zY-XwfPt4|Tkf*qGlkH?GMNYLEx-^p^P|>*TBM7jT2fU|T#Tje~T|1))ahm(g4UJ<4 zO7~PTOOq2s3IwF=0C$+%?$?=}PO1X)N~&_SooY_^FKEns5Ob)Pwx|SEnVOKW0=7WP zV5N0!{pXMS#rOppf&}E4qTuWw`1{if!5}5E7;6b8=VHpyM9G*(uqC(7DJ9byq#0*a zV6moDAZK?sNiRbo9{cr@N4dl3NsB-Ypotv;i~}&f9)nD(0Ry{YFN>%gmae?MF3b2@ zjg@{Vj>>N>WJcxo#k#os50)h$l)GDiGp4aihxBL&(|}P&(sOGq_uMQX#-GNbkN=sd zlM6~!iy>v`AVG3Ko`7}h#Fh9+6MAsw)SvVv+dABOI9sQE-a?PyD{&R$&1^^zK%rjiK5a^h-vAr-aa=|_^TPJE zn{pYRcL?ak|-BjwrC9l9difc|- zb%*X0i(Id!8Wmb}X!^LKZU@O-CH$@V6DGjAf^^pxx&lvbrbj0!;N-*(RdIrvXypS? z6n}Jzp0&QhF?6%42d2dh_)V@pV%6$S!B|@QG#S$?ix^VJPkCu_C3}t5^{XS=4M9d) zIs*nErNuTktg`3mK#K@16M{Vy9mo+|H3|dfD-&zh{;VC%>R8BPau;+X8wJ=zzL}kl zJ)cJI`~3PeB^$oQ;w*w=Uo3bqgujna+Zk_=8_5Cb4E2$o2|Go{!#0_>c%}H_ph0!F zcaArf+%oMYt*`p7LUQt#xxO7*?249vRh-(=L9?`fXAdbL z0J~}jlU`0y3WX!gVxc};fxQX^r%>jNGDOTclyKGs?AA8FuQ@vIw?-sdo#_wj7Xn#Z z4r@jCm1$~noz5>GklBJORW<~q*@R)(mU~}wb9b4R3X7T)uHoV* zFsUn4L~*Nvl5c7CfcuF;#BIQ}49_sYHU3h*#(7bGBM`kcdp09(W$J1pJt}GQ{>3-e>_I8N#R94U~@<47yr!u8rBiZ6NhEjT=A`hAE(Z`2LQ3G*;(G z67#CpnPAE2t#6>ZryiP$FX$G6X;k-4dgi2^Yk}WLc^X0kvwKARQ98{7=N2(3w9_8| z<)XR>^?@P9Ja+GBZRfz@>qFb*GyoeAKQN(}jX#jy$&7$=7;0{Jtuf62;>W|TSwJ>T z;+Pz0bTkhc@z$WEsLfxEPKJY3869jA=^|&8wE;(kK%=dQN zT|Fg;MRT^}qhgOxS4O8=F}=Fm17!@&>Q{HFvb(O<*p%l10@CUEEkNe|)H{wJ-#g0@ zD2IU7m-LEaT6sa6N9F>EqyPttcn<8b@fR7{ z!r&E>lf1Cvca>Y-s19)&fZvMz;!gU2M690g_1UMeTiIYQLBBoAdFXq(uJX@rca(aG z{FW(&d7$;SrP6m(KcJ5*;kpnKdFQC82?q}7YL=F2B~{44rc*()PWb6om#YXbPq;Fd z03Y_-_2qaFg;#9B)S#jkIaG$(p}qXk>rj+>oB6h^r*NNdLF;g2#qj*OO`BtUR1!~Y zR7-JE3dBo-ZADZD)HeqFgi2{oF<+VX!oY$gz<16XL&UN5qu01mA1C!;&%v9)Hs&YT zfYagmo?rW9f1KJ!#|q#R*6%C0pFQda=Sry$)_Fq*VS5O!rMpa3Tal$#KsV zZXVbATUZy2qqQH{3~iVM1NCaVNcZ;_*^NGI_bd^tI`X=`^?hI5YRXv6{tN`hs6G>P z<*K+P(TaEq(ny!Rq3tb(;mq*dVdGm;!;9_|F!W$_6H_`YUF+_YN4A^M+-YT+c?B-=g0OWbAs%?tvoXGF z?{*n@M{oVzZS!!XKEHIZo->TA4gjpjBRi}WROuuz_n7f=0nc?@jZx$7wx#f$xuHtZ z3ASUhWgjjbST3l3{cOGmR#)RJL62a3UvVZ^Y#-`ece1!rX5d>->y$$ZkDg1Dh^)yl zHect-a5f!fpBB|7m6!Ia7*sgsS0x8;eoBK`WoLKVe>lbK_?9n%sCI`H%_Q~(i6d|` zyw8=yc3MGzS=B$smO$C^#p@Q2yB)JB<4ulq)&Mn}-MAaIT5wnH#5%ou8>|DBUa_Wn z!AF?mI~!*|>35&N8%n6ypYmJ~6Ojw1J2|nN;DkvVH9Nt55#NTxKK41LDM9j!-;EhL z`W-F?l-4|p8Wo*CO6M6t|dR-JV7mmEH-;C5Dfk$WiQkDz{)gc!K zqCVb}F0ob9y@^4pIvu6ThLnO-*3~Z#pkOnbDDY0iaspbz6Od!z#6cJ+y~vVydy9}* zxVh>W-vgZ4l<+*eSQGEF4$}P=2PzILeiGE!!Tf;B2=S^p5=)fq4VwgbrJZ7W)ulJ& zx_l=x>4La5W<`9gfk%DBJGdGyS){HyKAe#k-~4c+ogl_sisdWrfh%iIp&!GNzOddt zs@+}^jH7)i|6LdX#Ggeop5`WIpmHfY>c|ELV(I(T#%@hajWjceJ> zJz^`v^YvmaFKdzBVUgLIs2!5+-5y#kdxHvU&+QGh>c} z_z!MFV*J8fKctNZDj{H-JMY4bTWpX#I0?>`L=n5|Hsjps#q!Hb(`J?aR*oI_U56Wg ze2R*(*VIAx<(W9d44Ig~aCPJ@>HHeAyQoeMiQ=u5yxaxwrYb)TD<%)U(#}RAqSB!x zzY&6BTb*KCd?c<++#~M?KAinsup0ONd{&7Mz1-9nA6YksL68fU`N|A$Hro$vuUH1$ z-op1PXwfF)P0#SV1%m2_Jro)KHyQD`wHpMwwXu-~OospKh2gEUY$e^LDpuqHo0haW zCu(1PSe4N7&t}ghm)r0L&Y%mXNyyI>6zTWf@lJk^#;+ zV0C#+_ZdL0p8q06wJ^Oq1p1@FUZ4W6+Or=3qIZCRT<*$)X2-4l4IwN>_04!`&$dS1 zIWC8WwnmAc{`foUixjDNlY&S$2sEcy8)_MzB#=9j-f%}G#7G%;qEpB z-mr(#0^BDUP7V#tb+wqEJ>$vz&TgRnsY7p5mS0UDa|MX0_l0P25%@9+1c9k!g7>H_ z%IZ|gRZJv3+{K>#vopH!Ud26*UNds@go*QzS+xVyj_<=?YQtD2yV@-5tZrD^zF0sn zYAipUc?%^^t};|k^{`!FN4ClI8I>fxvqk>$RH$kIxLVN2>;w82!gCzq!GvHYq#BDh+i;$nNUXzC)fDBCM!>slHD>M#r z{2MauAo%rWc1`Sst?{#9XX+UjCeTujb$E-reo-)F=$`L#C~CkWi$h@|6JWXUJ;i|3 zw0sWs_?UggBc5j53;Wuf3HI&4jZZ_oKG(B91;7z?5#0ogHDSED$;xBI$S9ofhmu0A zXg#3bpGZD6tYGP>=FPo=r314{rfcNhR7khs-!ld1>v+|-LU)+$8Zu;?mwrnT*BF-` z$%MaZJ6Fo&z$3Gd$u=y(@OxwQ+NSW=kATZVz?2r@z|WG&;PY&VIZC$nK?rXgF?`{1%R}SO89Ea!m8gFK0{dd#*JZ6zN01iDGSd=Zs*ow zzPn}6hLizMq8SW%I}3|xUMnr)En-166B=Q@wc_VNGi;;)?;bM! zXv%uPQ(6RFd(E#z$5$^E{g~kqiK<$_qDcBG}XwpeJ06GC`nwNS@ zh92B|>%MH7E$|e6e;@j z^=6R-gc-0eEv~iVCiS6|pv#J%!%zSP$z$0ah(WVf0ICWDv6SL%jwm$Y^kg(la0g;Q z32lbNM6ofp=u4RY(h!}%Y^nZ377GAQvD_h4mQ`#cxPt0h6&m(OA5d~W-{whv0IP{C ztK=bjnR#x1|2sHk*HD~=p902eqO>wrM2w6|r9A~sNpV8aKPjrfHD>DAYoJ6KT)Cz) z74Hig@C>{5yj>S1S?JBvESCMbNWXO88z$>=?qJB_cz!-r5@p0Lq;@2dTeD2c@1m9~ zaxFhh5I`Uw?+=P*|25R=&;hccuDQGYA>Cc#om*+*=I-g4eUo>p*AXGx=tKRMFrd8x zInF#gAv=heyvbLXu6#XB3_Q-aZC*v5u943Jh8N=E<5ciS9wTf235#=DyTZ@W5b~u| zuHDfAuR+XHR(jj$AR~o|wY&~5iB3P+k#EF%v+bT%biOCp;|GwM!ITBM_p%Y5LOai7 z{xNxfgiWeK%0)(~s8o*TQ+noa2&$Tri?G1B(ky8~NExpRy$qMze2=}*qU({$C z+HQ(5ZxG^jpHv)mC}I`kg$Aw+VTQo%%Gh-KOYwlH)3_EmRKqHlZwF71Urh2mQec>e zO4B|(H08Vi-fjQX1%Eo3-I>j`ML#UK9R>~Rnt4wh6hSbVVUdbF)o(|6EHH3a~xi#bx_qGbYKU;!B(_&jlV~K11cn5 zaI_mp&rXOO?ikb?IM7z>1VtHJ>-Ren*L;fzs(s6obJSe$AHllyyt1o|0bf(A!Sktr z3=jxDljMxnCFBbx^f3f$v$prw7B3S|M z;CH?wBY|^u5`mdZ)&}fRbR$1)4p$n1l?Wmbx#+cCm%S)dp$_^<0ijRk-NVdg!kGHM zgv*4loF5>N`+f8&z!|upRKnxV$O*lQQ#ALGpZ+K}G@h_@=v8r`Vm30&a79E?rCCg> z^3286?|K@4l+kSVH52$hpgF!GAe`^M_-+eb2bdp`qs%AwS4Lq=MwAB~kxPI26_dy` zQmXeZGP)>7BD&{Yie+rD6)G9uxaw;@6<$E4N++)%4XDa>p=s;9%Vmay@*t=|9z5GP z9ek>J_H|a6JxC-JNfS#?5s&(VN-;N}8Bj2nu#z(>vzZVASGHtwPXu=Mum{ zYQ+38xX>Ro;q0JW!FoNl+3~srg#2owY?oMkmiEU8IQ`R^f;8g%y9kQQY{C1+!zh=# zitd{PMoL+rC+HFqA4VMh!~M)kMg$_BQXY_$3{YuQ{?7c4gkI)N)3?1kD zoe)w+o%?ZG?iO!TvO7}IFNMPAYp*U2groMtJF85`ohCK%eDsWT`jmf1v!(18kTiL> zd$G*wq9K!DiG1S;%1h||btk$d88*$iWaXXZlq-O7vh&;;%W$9-ayawC+OLG{?+T|J z%0%kl5T+@!(wGhqZN5Rg5bP%V@kt;06kh0M(~D*l52B&|HCi%UrZaugv0-fy>vj_@ zEu6R*AB-F6vVi+sTS`0)+DrMdc(cA3TGM-~6Qz)K0+GQXkbY*ehmV@w4!idy3Hrur zfMO%-Z!su7QVy_qo-$TlU^+9*frFt4);WC+HSLXCpzk_XaGKxOE$@^DkYq`-nd!nZ zAQN({Hrr;T4Z|6HBH>w|FfIdN7se#B zT{fPykG*b|@JrOSHV{4PM-D}Xi2c@~b%Yr&SqU$88F5=@j&Uh@I_`r<>~wAKFFeX? zDW@42#j$@xt01~6vGQU6*b7u}#bVWVzH9UOAeynt0a{#T8G9KcYaxRTtwa zWvl8tw%NK?Y7r-|J4*&)!1VG5Ydj{NLLl+6I!apzr~l?(UGWk zpdAJ1kRT%rqas?+`6plS4$p)Rrd)+N4`##p0<&-X|pEG;Bx1VEg!U`y{wIG7oR<9QwOkxjTMf|N# zbr)~AvLkynwgh*h5=>0=ei%2z{C?hNg41t=;S->c;FR-}95T<1xf}27B9vry=yJ z*+`kTAF~ea1?8~CXyi13Z~G3>ImQ~GS6jrf>hq87BXywtwKt@TF3eA%Lr5*5kT-Xw z5T-^Tr;a{YMM!DDn!iB?!05u@hZ4!EX}pNBOZvzJOy`0`UooKew|TB>^lj19X9HGK zri;_MB$rZOe^ym#NvFO`mnZEcEDY&l%8p%N$aO+g2TP>ZR?Isee&557bah@2Dh%He z?Rn6r$;|{{wU|F?sAt`rbf0I+#a`m)NYrMYkCltDTvHK6w9UW~+RFW@t%ZvB&+9NtkBY}xYe;XH(pl06ien?JXpI+0BIq$Ko$u$DTwp5}sr!{v$ET_c#XHt= zsInw5YGel_Bg^U}rFg3N^Y5hXQsg%|syl@#55yJcFx3}&^DVr>tveEQJ_I|7RUP6% z)O7vBaP=0n!z|3oY8I9?2iotvKo$AJ03J2z+;-BI#Yu!%(kAigMG+_KF5w`OWFv-* z!*%-%Rd^1K5m?k(w4K*m2^=jMT13Zn*3g**zZzp8T~{@nr#n^hDpTu#_DPgcN|*o& zC>PL&&XBLL9XDr^F_=VS-#?n~-dMNUxdcbPReZJ56zSw7_Acy8*MW9htRTRXOkuaU zZc}b`0*gVt?iZ#x-XJ#=;0%X6;E1UsNc+UK+mR2!0Iint7HmtY+i-6-!?DU~;836j+Ef>5lZrhE)K@CNtkcNz zX=FXG*D@O927Y0AST-YO)wWfho>14|uN^(W^eFhuUD;2}>CHT;$&XYxj;!~h<`7L# zFuR5NUb030%lNi+(QU%RPthk((km>ak)iYST4;GHA!hQ8p^Z-fgK>6WN{RhRa)>v6eig?rft zPuVdIDu^N`O!(|xfdys=U{6$a_S2xKcZ57MCuc$u!IjTdrydHIQL!;jRn>C$`GJx_mtxW;W#UQ9cbBzVpVL&ioaf5pBZuFTIMwvb@S&a(U2V-;Vs}4dD z(OA2jkwUkHqOVy0>4r~sw@*q~10x{PNJs&i1#r8(f0gLO28PhygI7pH>STx_c;L=M zYnCv4rogLOAmSWO=x29M#F|Y)C0Hee3B);7q??*VmzwGsYPZCSNppUlF@ao>UuJ)( zoyHDGWBe(f2-&HFX{9TGve3zwr9GVO#X^>X*Mt2P{tnmM*N)cEVv$k_u|1}nT3d3C z$!Qe;4g?VNwHH=9k&Z{*N%VSDBxV|sB4`+Rz=y7OuHHpqthLiID(;qkSL=NUXsw0A z{|;#_UMa*|fj~4Z3$7=>NUz*Q%zzzB!^&%_r2Z8}bHV(0PUo+axuy>YqRj?0es9_h zRg#$Zp){vnq7w=mY!=S$%{iW3Gi^9Mg1w<>U+Nc9qgi@$p7Cvfx;H}(9Ua70-G;-I zE#3c(vNvtHk?&A5%Lka&x_uVIYVOZ&S(e~;ri26%8U4iby45JeqSQnc%Exns76md( zln$IHCi15?0U;g5W*}rcsmg`SU;cr~dMKG6EV+;9?B)A=cZgP`bB4tmI8+-DYvTMs zVeax*vG5@xj7ma@1M#?_D&E?JPQ@!&C>3gh`Srv+n`VBh3(=S_T3hb?8C-2_#&Ka*b-G)lxVz6Nw8i zM|Ij|O!Ni_G3PsmwaBI23H`3r+YmZmBS%syN+2nBUq7A-=+nc=#MWv2sBnK`YDi$T zCu9*{?b%PgTWL@3Pw(Hm`ZvqTD&ptzrJ|gO-o+yr$NCc%teQ_AiE@^2JfzmxG`4}o zgRy?0%Fipw*4ZdJ8W9-70f%QTLhOWsb0%kETr9U~T64N zowiZaT0UW<9L0GLi|=~^W|)Fo6;uS;Z$RELYII5}uc*PUuL;q;yMa)Fvhx%o)MIw) zpzEf3HPGrn?7>QsuA(Jrdy-J)^v@Oe3TL+$`m*qETuBRG#sE$i$5`v@2T)=bxz+wsGG40`*X~td1ZN*S&y5hlpj#udQ}mFdT7-y-(D>i<)7Sh_S>UU9d=gy zTE$F0x6JJx;j+YUvQq7u)GK5vd@Hl+Vgp4*-8oygauUsfF9s8!RG6{SZq4LCkmzfh zNG)h5YnTzv9utR31LCJR&VmbYt|-htk6ppFJbCEn0a-mM{>D1$R*Mf3b)%-&oj2aK zMU(q`hoJaw)coNr1kgpS+#89|UG0H>*j&1DiC7Q50kN+Cg+q#?6oH#$2dL~`5vgN+ z(ry`^$faK0r^TD>GUK_Pa%@L2uTden^O5&v0lX>MYgi*um0hYjKd+fnu%IUyI_Yo;i}Rx5!5YDUWIC9Iyz{KSz_@6@#(1wLe` z1Btx+m$U*4ujQ{#yiy|NlL|d5u>L#Kl${M@_$Br45`1#uLf{iU4^zZVR~9t0Z5xsT0ltlX~nppwqk87%Z(uKgzOs3)rK^bJ-(jR7z? zFl`eh&3`I#hAqw^f$~OkaG<}H-gbNJ0;N!f&FKMWKkIu zU52RUUU?R<6e0e@&U>YkUOQwrT?edrO<8nzm_`F?D-O?Yb`aBlzyK@bZWW58mt}ioQI_izD@k!jVp(lM3 z0xaNL=8|@UFkU2l(V!mkAmWD4!sehF2VlWe-n^M*VhEP+uV%G3y_SP*-|;Gb!J!C| zxRb~pwhtwBRiq{#kx>G*)Syp#JwDFNZJRL=Cg0!<4V@y`BC(%kNd;5bpU9A#^N_rj z!t5)~PheaoO#r^@-hQ0`On`H}YT}((%N2$VC32hEt6Tf@cE2-Nc)7wBNC7e&1sQ#$ ze#nS&;m#-tysBAN=B~)fV(63?0-l6XtWa?6q9mqe{B;b&qpF>Wn^16S_k4$L=8t;= zUB!4QP#Q-M3MDtl+gb`p@ug8fgo384T%*H&tc{atfhBTC%kk%!PCVPszXeV=(QV&j zF|&gEaH~U^<`t1yji+-YOwPz4eV$!UGVcW~8|R=02S9`X3=--6NUB&25-({N$5XuV z5J%?dk?$QsdP8|zZ_5rQ9@Nh0@u<_;2|XiP7>-v-R2}l*VdWfPHHz3Hz+oaUR*eo7GbilS~xnC5z431Mv5N(^@h#HS<(i(!a& zziar*iB3CeA{&)&14;?`{f~ddCg0zDhB#~G^VMwSpngxbLaExkXHwE|fFby69Ip&m zqh5*T3dLR;{f!SiP8TOkHc-Z$3KUhDELCk4V~hM{i8vQ%IT9d;uYgdco!N6Q7l&xh znD}r^$&L%#AY!m*5Jo*cd)8+xsTsk%p&-ZcIcVZVM?`h8zY}X;Tt&f2R$uYg z(r81cmEl6smF8f8pN#EJcgow@ls0-=WM{@at4TYu^-4a|-npWSX7qV)P$!K;TSlho zNKeHu$rJm>I#;URDY&>TIUcG15SV%b*8}GdCt>*w59HLHr+NNhR9 zX7pYKBu;pTif@1AKIVl9yDLtXK!J(oNC&vI!JglmQ{Gsla(pJ|6f>~fSareDq48x; z`dY4aZ0=iM<#4KY@|2q%+NWH!z11uIOn;Jd3+{DbcHmp_#QqD$9*EY_^G~df4K?{% zE#N1}cM6CFxEfCp8+C@9v}Z^KzaSP!N3*+O3fu-uqjv+-4hXAljlO2bC7VsStCYO; z*8ZA32S%#vFU&rrutQ!tiqS2jXhimRw8Ge-6BWuRW;-5lEXY~#txU)%eRt)uJa|iA z;jD1AvzMoZXf@s5DOJYCOr-s$h6^}bRuL=*!gf-;5-T=kUr(h_C{?V)ix%W>C`RzcM@ z>*zLNW2ld>*WElnP0rta$aR@2qFcd!yccsC*oe7eq1fehoD0zm{aw_vFMOD&4m5j5 zGjNz_kS&d;dH^=G+z+YGz3q|>oA`P!+U_s!{G3TNo#OL3F|P5qM3P76w zUT&QZ{v6Ay?C)0i`B(^n>7?!dG`424_9Xp znshi$`t7bH0L1Vd`Q5Q~A2K@gAIbYgswMIcZRptuY-rMX&@WivwXYtj^o}4>alzr_ z;E%|MoZp9EG%-C5>H^(`EG`dTSpKE2BRC@I;on!mi?%v*wsfhIDyk>6d_HTj4VCsM z@{#C2ibmXXMH|rmFEO5XIHWO zH+YJ>NkRCmI3?dx=T#H!@OLu^idMlrC^8sWa8Y);^*0BMbAvzQo$H27Qst#HZ}HcY zb14G}ZQ6&#Y<%Bu{+fqlYCu=Ms70pUL};$ZohM;VPLQ;U%5dwIsg{&PFKjY?-7tEw z7j_X&ADFf|3}u=dT{!di$U~6{XuDjm@{R>jifSt2G@N#yM3rD=xoxa!G<>ga#%o*S zCHa64E67j4C`JFTMUEHWHk=+fP6H+YJwU?0Xu|ODO~P%IGc*GI6A&WZsI+Ocbsda{ zhQ9Vn+;kU36t}3(g`&~AQH#4)b}xyRfJRr?4~d)*zsHUiu#D&ML8xqR%PW3zK7b$RLnXyGJQ&Rf zj4gKpDR>R-42+W0@udZOpoMvFc05)Bx?ys_*WYe4YX-;39#~qjVP3^~ zdjNpHoXiIbBfbB7#ZM7{sT>^cg!+!KPoqRNYmr5dPOpfQX7qb>ePbL12zgGDr4lu6 zJ5n@tzP{U)e{}#QNXRVJ_!~3?4*$7BTm*W~w!lr#drpSI`E%hY+?TnF3M0^>#c0Q* zhZGJn6U%Mz31;37<@bWm)aB(WDx5L^@^iK~eFzhZIGljpP^J>}_e+V_>p0R2NG6O= zL#HMEwbJDT+rF1c$vf6^rvjnn=aN2~(})*^hj~H%EwYxG$lm|Pl~e;x1|}5En}X^v zpNK~5ui57%)&qmyYzXa1a&bW^`Jea;dS=$;5s$G9n2c!VWpELYo$eyCsuNrWA)~oo ziCb53t$~KsQAz3fVL(2sQthTEThI%g8BiWFOx;tXvn?NsX#r||yyqf-?vrrU{h zl6d8?m}?)IskdYxyyBK<2%c56Z@d>p0ATU=HDN@*I~gUC1SQ5%@iC`G0_i=F>=={h z*+@&|p(Xia)SJ|nY8?6jY{rZ4lgy~n7qU(7HDPanXj8YtaXyLnqiI?QEf~;x$C*>E zIzh|j-yP|to!4ZBOqi+`)?s^x5_M)Al6XB7dS&)Mj(^8BZIfWdeTDCF3kiWPI@6O+ zl5P8<2eAHYe>X--29SMZE*dKGqwvmn84p)^hezc>Ec(=^2DEF$Fls!NDpzhLorY7> zf1!jc+BDomJ7S>mE9G{5^qkqfL15&?{@T->dMPp@f`7?z=r+ z7{)&V6FR23^+T<4^dtcvQ=5M?z|^4^Or*?9tcq;;O|K&nXzR|dVTMkBuyDIv2x901XEScegU*MHI^|Pb0+gmGHo&DmG@ zN@IE)_pZd@=N4d+xVn$7=bw^xY*==wVLYi-UVdn|jqB9`)m|Dud^dF5KTt(FT+X(_ znycLD6#>DfGCTB#^__-9s2XK`K5I1zksxV}dtm36*7bY&0fWy=bf1h|jDpz_J@*uH z(jAlw?g6drx3n_4E+*59gdX@8^XcdBbO#%67uP4Pa=!0?@maB!A{V>D`HqYu)$<1c znT2AcG1b)V7GlHUm}{UG#`SILZmO1vTTI1Nbs0GHdK}kr?SDpDh8mB2-IFscNh(AM zDK896xtIiUpH>SHfc+ja(-d{TYSI_&czL%-y1RZM_yE5MRJo1cjEWl`ePWVR6uJDv zi+nT~svsX>{@e-t$uISBmGZJJJ-Iz`FogicENOFyj|9(+%K$^xZaGnUFmfHI7cmqJ z;R>S*I$F;Ir&YCOtfoz?mWyhFvH&J}j)@nor=$#N%RW?rUZ#KU&{MMh@qdJAot&R; zALjT_Hj{#xKzFvp-={twe_j+suW3<9EoH(rm8}pq>&21GS{L1Oz&_G3Oqsk>KV6?p z?xCQ${}o^J*p{ ztJ8rrSPL7m^lz}P3y#s>LA8F0jk4DA2b?lwgvoG*UzF5H(YLYi4G-E12z#_6y~wLc zY0~fFHPKq)Caj3t5l;P&G!}}&p!~rY6m`{HlP5}xi^dE0P=psVi)q~-7ps%M?x|lC zXk~e+)bk^Y*-e7Ka==_Voi~igv)p$m&p52j=l$^ssIU~8{tThScBBt`6LpEJYkJ{K zULhFEJnYofgIsW4cyXm--s^=DN!W)u8Z}!~gq!F7byL%2(h;NGcs!%Gg$6O;we$ z@Gp{AW|;Y<;m@Dp3|2XO>_Bpb z#T}Sq-PE*=?rbVOy+P+&k}SLj zXv~vfpdE87!~-PFzOs8=kWdArh%cz+V8=FoEIP=6sVM^Xyg^;| z&Jn?X!ks>eWsJ2b93Txr9EhAh2)fVTxx!>)IUdp$O1xj7ig<+At>yU&-oD>#Q|I<~ z=)DpEdEGCZ?)LlHt=&bt-4i;Yr*gfe9gL|&00Xi6<}FxYF3(CAJNpAZI}yoLX*?Vm zP}x#gc|K(tYEyawO-V`GwS_brpp&d-D)5e|g5a8Lz|<)Nb(Um_Z{$qxHBvvy#0v49 zV?gb|?$$7?n{N$8R7)Dw&);I!NtKX~6;@WCMvM<@(Pv8U%SX4&UdMgt->U+0sC)=w zq*>R5Y#&|$BeNXJp+q@Kve#Lr5#>LtZ320f6@V%RY7)R7qV_8G1vQ~~EYk@2*Iu_+ z**9whIq7o6E2E2_nXB~=8GO70Lds}K+MBY_DlL4oUK{2F>x;W(A7>&j8s_e2eFn3TQdG$HFSVsNyS(YB z3SzUoVfZ`zJ66F+wKMxstfo|@;_cS)MkuN*w@Z|5j+fw6k|FD|E{*%6q_Y{bQ0z0U zwM_qwco1r`S#rb;?oIy$2!Ue3U@SNLT}Z4Sl_@Z{ zuheXu4P%=-ojD_7uI)p;v)TsSjxdDsIqKZ+#fzC!O&QLTj_86Ko(P zVn+3sfqFy9Re5?NQL%a@Lvq&c3K#Q`HBflg%v*B~8D6$eTT(xv3{#T{U|0AdSlG5p zbSh>u0q^OEV|g5outE{fRFs+J(>4xP2RLtwc#7&NBU$hOk%tCHt5A)rFeism;#r%> zEd*+8Tp{v2>iuHM@!*flG{2*{ILhYs-eEL(hOB@RCbQYBh_xJVy~>}?Rv1nGJ;}WO z9nlX|eBzW`SQqTuTkT9w>-$%N)Y1q)Qf_(BGlxuiK8cCq-pA&~PBY@!>P~zr@;ba4 z2E_09>O8BGin$`wh5`-XYUMN;%uv&8etr*H%*})&t>rbc_TjJ}Zr-p9lq(=|Xq3AA z**p%fQfM2pNFD#GX=foV-^AD6^Q^F)#mBu3^{K~LH&%eBG2GpzC#jqI$omXMu8-Wpy;JvW#+j%9Re8&a#S9@csigl#HcMn zn|ZU2<+%hpC_e1 zRiY{mdYhgle~Q(Ed0kqJaNSXaP$VlNH3jXpr`+peDMg}D;i46(AzMv)j^oCDY~X6QFKIjIYyB=Pdr9vIe9vG>+Yl2FvQZzfY!GN z^S@ruC~R|zT~5n~(IC~;Z-qho-VhK^qBpMzpj)oMBZpM-y4D^Ly_Ff`Ez_JGOLGHH zOWLe-@wui7>e(g%C#7AUDpaV0RuHb!@+?{K`8_y7zVS z_A#E$x*=VD$OmRca2ki~H8{D2DvukH4=Sx7XE!HY(3jrA_gDuWffnWXTE3nC#71>f5{DI%{r+@0I-kx{ z4|GL@!o?qWL}*w@+!d4|5k`ko<(^eq$ z+^SEo6KsEBllPKd3mjj%5_Wrw#t?K|eqtw{$yMcO`Aa~yek$bXrqaQC#?JFQ>fmC( zbswC^Ml(H*6<+3bSgTXDKJuJH1xcYU?U5c2#o1>Hrtz(+U_kTGyk}9jo58v4Ni?Ks z$Ig@mmI;rNNwsKLad}*=#a2Yt#AWfTBgQJ-Ef>(>_Hk~>wb17~ftPSjw!0*P>OGH? zLk6=|LH$3&8Fq6dq@qzRX0!&PCmJOfQ*|I zHqEjis}jyGhNol~hz||FJWJ(;B!vLtY<`Pj0XHIN zENJ#r4fSXnUYmn0vl$cr`&>5)ZF0ZaP*(UhAnxPwKBm%LeiaKt_rtb>Y!6!biDmqP_OFa@< zD6`oh7FSs%c9M3W4eW(w6a$HnAa9c)hqo~|?3b^?i}|y%*bh!rOv}@PJAdz7vv5M2 z$X;!tM~fJtxfDhEGZQ(z3u<7s!TvbNjWb|+LZC2CvIcYwKPLYx%DmA%DkkbghG#<$ zCjFYLoglF&c?I`LLIyvfq6q{naO1&|s&eK0tAbX;#i++huC^|jtFE4Qw<{w3h*=19 z!BA?&-VD%mpcqU-v_=%aB!b}?eSXkc@>T!w*DM5Y6|(oPU57F> zDE5fkkmz2U4?i|=p^6w?J~_QTF9b{0KVg79ACf2+nrZiz;yn&_z(|7Ni6_BCK^WUP zp6ZGt(`|ZyHsLs~UTbTt1vII8y}C*UKJxxN5+o3d|AhxJIz&uKe>^Y;LJ5<^fkb=w zC7zH`YzR>eX|XRiUyG7d!B-?d)9y@yZPJy!OD~kZ41wf% zPe#A{{X_mr8?ljb4x=Q9eT1>Qvz<@PXn{J9z|T!npfxiTZGr1es?oR|NI7PoU%aU3 z>vvosA)o>}xkk$i`Y34{A88EtK~>}x?ruyRf>;W-5eT?|=o-Y@6*dy{Uq zX~(`KqCa+N%A-n0^l$-&emJ_i5g(Bn6zX|b)7CgC`(!tEnSwN&H}xqobMX>ho5fW+ zm)BngN^Y1-NJ7IY)9unt?~LSYaVqY?4>5ah&gLSTscG7X>X$_^Zp-oe z|0Xi$7ZYtMiHbd`oKBy-avXI$HIcp(UUA1QzC2H9QA2~;TSibYBQ83Zi&|EH3$YlDgd=W8%hFr@7m@rqLHNfTMv|YWh464 zld8wqr#g90`W9CR^k!EO;Y%R1xd$$VKcw`(f=b55uAnTm(hmC1l-u&$^Gn_F3^l1b zV~`#@PzSqvA94`?1|)xN{2_=e1BVRppgu0zG~ zN@tPM`Je_-s9ow|dYWoZSnJIbgU3(=^ucjGByCm2({QN zxbv?R(B|0Y6xsaLi3`Wra2{%cpD6A{&5(Cp+(`LgFY~L_F;;Dj^&!$bX~xn~!ADTZ zNfeP7q#Cz=OclyOoiGe=XljiQenev|9M%=Yunk5kX{Lv@Vnt>Jc-}pS{HxaiKin$$ z-OHy3H~kIsepLkE_61+L1@B`!J#|3jbjE3KCTV84Pj_D|nEr~RRka>jXB4L0y zP!rQz74QT)fzrAxFHx61!P-Df0)lJzNC(v<>obI$%jRy^J7$0D1Y)HwihY~W8aeGN{5UJRX50=}}N{30%c z`7x<>%7Up>!Su%P>q1VrzjTP-DcJ&c+Y4{WEC%a&`CNT2dx{WnVugnG_t*L^?B(m} zWHtvhbnzlUdxXl62-ZTJ&hkVK+6ZpKGmjAV1LbU`q7w{Ky&YM-h&Jax=8vpxm0g+f?nU{CIr| z`d5FRBL5V=9sTcC_DgCAZT`AxgbuBmZly<7++)>}zN)gi1R-6h&GhY(D9$#}h)1u| z-PbrSp>{+d-Ff)V&>`AXq1F|0ACXkme+)1s*UOmXKeNV5p_P5R<9Av!a9yW*SVRf5 zZ8^gOXhO{_LgjQy{;AT$CNlOj&^`E=@EX$8z|ul#C6^!A9krSQmXv{jzZer5#w}|b z=ePGh#0evwxlzPnE7TxN_$QR$&&Q>IYJ=DnKRi*m@Vi-lG+9(n+Gt)WG9vc67}B~t_B zg%qaa7i2*W(2yut-hSq!zo~8irupi~xaDXy{3$M_;VY~ zP^^V z)_WO8-t3R*lzjYl*0xT~*mtH$>NW;}4lx{W(5m7zw6qGd9zPj8nN6Kp6)tnjb?rnT zvoD?9u!~B=vquLq0jRp+>5V>f8HR+E{K7_@3arXl+l&WK0AWQQ@3KtCtiE}(tjnx$ z%xnonjhbLriByNZT%#z}AB$056;!HMwg_6(C(WQQSG0WC4jr^;-Jny;iL}$r-28=f zuZf)utqiUHGA!uT`Mr@Mryvh3SUR>LN9K^&sm zt8UeXBR#dnr_-aPxuaPenctj4lZgmO&NeIX&wBImckPM7Sn`uc!u6<=pT|5l;sfpFIS&K1!YA!bS_#h~Q$V;Avle(L z*y0YE$^P`ii9qCYZl$xqV)#(*;Czu@1L=z1kDqYtr0O*KEj+yQpN|F{GUWCfc{UMG zM#0N*d8)H=rG-V)MZp~w8GTsA@&=7!g)G%FqMNx#Zj^A|iC`FXYv{Sa!$)c<^#4pw zGsQi4@=AJDrp7Wb!{L<&WPP?d{~Q|rO5blYh5^v}A~Dtn>N5ZJ@RbjPDi}eLRIZO# zMeL1W1iTuPebN~YwsP&O7)t5VxBb19@_B*qUs7v5g=xvVeJ6?GMK#z>PPb!{w{%-YhF`J*ZYMA|?e3O3&hkzkW% z!P?VCC*f}fDpKSLhs^v`zm}pvZH?+BovOkk4fFQDoH`JD8(tpz;dm;Gwwfxc;e2W9-8r+Is?m5p;Qz{b%)diB+xeD zrlUsV(yk3KB4;7qC$-j}|RvHw9lrK8^)z?37W;p%%_Cr_#19T+kwwX!*sqS%*7XssiSP9$0iw{xw_fX~^*X~0&3?a9NnAXml!>tYJ1 z4D4)HX5+unPU01u9X-^jK?l>cGROIT$-7QrjwHaizbddw(;L{8;v@y2J;|d$_b^ey z?uW~sGY9B!#=9wiGuW>BADF%wvuj(VqT&FH+AnGV)m;nCcf^#4Fc)>Xb6l;jHqyU`~B6 zhb#tE!~2!onYt;xiR|e5yB9DneiXs)Fe{$TMk#L(#f?ToqWyorCTcd7<~~Uc-RS-N=Yh$ulMAS%GZZdl4JhmS8I4m^SCNyQ?HSkSJ^%_n{{rSR zlm)z)V%w+o-gv0kB10ie4QairOv#WqyWOA@!Hy$(H77f-h-}Y|xTePLQXzbpWLRQ> zD(Z%^Hz|nfDA0XLqVBK!_z)%?d6ITWm0&yXW8EH^j}=hrEmI%OXNLj2anNUJ-rB7X z_a%=iXJ5=IFk{)xuEGq;F$oeI-_ji7)dgI0Bd3Jbt>rP}Bpg4_Z7ecD{@wSkgeRUX z5P0>VXltO|7pU`TPC5;`vGsZj+QG*$R=U>XAXwcdJ~q)LZQ=E(4^n0GRa_ltwMXwo z5(S#$xFB2QS$i0o#zk_AR^O_DMHp3|W^cmGsdj}y%?8R_fq8cI!UbN=#W4slQOB#l z>?|TA>qsA! zfznxY6(<@Kp8yXmDLo=ID_Z&!L^h5H!L{C7;|4T@-?jlULy9S()CCt8F~W*He+Bk% z#P&(C%3HzcweedhzYK~r7`)35*WY0<#&G+fsmX$g!TGsXsu=S!bmuTD;c8Q)5RwHi zWgbacQLOt1Zt#=*!*ifbj|2vzZfZyiff|+@jJjr|Id%n_sd^Wp+y3ig)|}raIAJ7i z@Su)qLcxBRSkhR$+$f?2oNFe_JVv&}z;HP<%rXGMmhdW>F)ogfj$zRVunV9-{!_mB z&@6L~`4aBAxVY*a1cZ?S?(Wh`YGgq>6E5Wb>yl_fHm$y8Tf;lLyMUgtrkHGJ)R2i+B|4#khsF&u;uOk0Pe(uUK8z}}?c48~`Vv=!wKrh{)P|3UuFxvu065kL5w&;;o2UNRCZH1Q~imx5XV( zQvt*9ee6OFHy^Oan#C!W_a~LVkvypi-eXBH<8>Fo6X#g)t17c=Vp;AiO=-TWQmouf#vo_xynq_#epG|aR1l+#H=!l& ztkx&m1ObJJr?s_brvLp0yyl&yba$`2c3{8Z<;U>$9j&xhTmv90ugLBPo#fCa%S%w? z$o4{DMr`#e6%^WmB9eolp1cwSh(%n}rl8enxC4a4Un-8&clt3!&jl0Npp+7hz-g(G zby713>7LWi_W~kDlOhZbN6BgnMCPQ>#KYhQ{Ytkq`WJPgt+7IdJ|OO=Vb-P%5?A`4 zyv`&sdlE&P;0KC+1LbwmhV-fC_gJyFDOfDI>|v%3a&~-oBfHx3R||g9)gDia`o>h1 zxpr(&`AYBQPkbRcE|yRXVo@v_c>ssFaa&~=(&-C-us?^W|5d||D`4S3w-E~HO+#CE zj#i4R(<~6{s!7qna5mN?-k5aOvF;U(Pss&{OGOmXdZY?-32Ua`L+h*l>+mi^AXled zo!`vh=xXkEA}3Rrg$xbH2u?Mn0>+rC$ay!3YpfEdd;Ge$_xBkw3$+tlK(N`xP?y+@5u!?@IkFYXkBY}995%CnTUsJu^a;60EI;4Dym zKlqwo&GmQwKGp56x&QmKKykHwFg0#RsZ)gG82V)I08vrdn+ymt}@ok`EWCdvHQ6Pox#l3eJ zQ*Ok%50o!D`J}Y6%f&^!XuWwJfk{KYTp1V{Ycb>_`a#(u?Ylt>Xh0T8fdk$GG4oFO zPu~NZRP-iV%-M+Ec5rIt5h~?YJ40>aw7Yd@mUCJKm^>?ANwucTPk;SH%n1(kk$Fzu z7k0QH3=5Km<#qrO5*EV{Hkv5ujp5+`%7}VazC~_E%eA_W3PFk+?U@}u5G%#f4cNmFIKD- zzn+)Fn?-vfvPmLII$>NClQ8O7zN{1@<$*F>2m|Cfl_}ck?j@$io|~#iigJo@!E{OX zctU+4>e(kA@M1eEw|C0v@9uKOOqBi;7vJP9I(c59slbhrcTJb%xFX;@mVw*Q{OKY4{#-RE2BkZ zES3#iUZJt|6p-a4*But>{3-3^qiawdMqT{xJCIi3e%0r@(TQH`HBIpJf`e{P zErg42oG+Xz>pY0+{m(;0B&p3hTF54Q^&ujUYJcVOyK-tmuE z00H<^2avj-5Bh&_jj@%oIw&01+g=neFlU_@?&tKJwxj1JWy{OsM^VzUDuf$H1J&r} zP#hc0B;s>c(D#`A`IADhqy`{f@&tp?w*bb0!<6{Wi$|rYIx+vPCd3eVyBuSkg^4_jFwsMR?d2!=`} z`;%BD;Ot`}ztHRp&qv^fe-;`KsmYQ1;+4>GU?ncaCVAo7K&7q>T&K%-5v4lsmhjlp zdR=-32~Qqk1T2s*<9^swN*(%YO8pltqu&$FsiV}LPD{c1HH?)mt%=0$-Zx)!Z7k{{ zv4=H>=Juw%cvWGeXcOtPMb4p@=`KdrQGFh_^hXG}2OWEwjaEA|*rqJj+5K?c6O|g# zxT@P%+6RdDV_!>gt9(zNtfXLq+`*hGI;d5O3o}FqTl_e=xdkKC&P#073JiUWP&ad#y{19Exiy- zY6mf5_@WAsa47|@cml9cgO`N%5>whsI*o5&ifWS%Q%}AwWL$;pFL76He ztWcQk92q^<7Mj3|IRK;OTp|o73!69IbNGsTjB}@+71I7Cy2fEDwr00&H1}NP-M7RX zK22Z28iXAOp$^lc%9H(@6Wh{1+;6+)==a&B&Bp$H) zHdB7pWZ@2JlAAXo`s9UU4bd^b!I+9L0%-;CJUazO#D||U(_dZrrm|mTF#x|MLeazN zLK;iPxZgW*P^^nMDQq=j960j%Y_uaKCmEGY&(m8;q;%}6<_5BB9_CJ4(J|s`OtgmP zE(K4;h=cs9R{ZrVp3zjIBP&5GT2}afUl<&weX^@pH>$4yUk8NtCgD6b&9jRNA$g~? z{V-YL$t|Ll(tx?!x+TA_Vl8(;(mrW6feA3)O-XVW1Cfu&bm5s|gFf+Vfyhn1p|y{f z#Y8^eZ2VBegcTOfS%Wa^1JnsV6vtVU`!L5+1;sPcgQlyWv;dKx4qvOHsV0WEHKks$ zWue25x{fHrx+@bhQW~2tb7k@ay=PDLJi8Ku?0~Js5~|3io;wXYEIGo77lCaOIDr zdNXjSx@xwnJgt9B99LoQbhG25ojr%{2wZCH>Vxb38rBfet4>l-<0djNA4__ae7Y)l zx!O^dN_SmEz|Z2^70*5qPnmfe;wph34h%1Ag08zegZ^SyZc0}IFQQPOD*WV_UA7~N z&&+S6rjIXD1#!4ThSYSU1m!Q;-FXx`r#9y_v^RYj``w{(vgYOp(5{lOmC^~Aec#~B zn_~6m*1l`K3xTVidYYf>ac(vy&I%gam9G56QD(N-0&63pRj%;CN-;wWt z(cXwE=E@eu@tw+q4#9$4TkO5xK6y8Y#}`kba~Sd6bNe!CmHC6Gnx|D6 zcYzet-)^W};P{^G7U4vKM&v*^fAzem)@}4=8sJ{N)M~GG?mFxRcpkeR1}o8eN*wm2 zOf5YaOA2RXtwnTgL>ac%fI6u)U8B3{O*MzGKoL^`3@MA?eyk!T-q^2gG{bl&%b7yM z{LsOl7=cjgi?ka&yu5iJj@8>`c~ZRdpSJ(>i|Ys1eKHBX?IUk-lPl&SmNl+x1|cSy z<-59~vC3AJWGYrMZ*}sK(q#{{H@uEXNU*~N_gbzYlt5E!3`>4R2o)Uj)DBiwx4i&U zj=XI2K7X;3nq|qce3+T+D{7WL_fXPP2yuvTPZ#%7;|LK*OV3pr9mX|LCLvf}vMiV>%rEYi+idc(*(G zb^62g-K-eFK6bobVWVSTY5$yVk2PD)x?lv{2G4g{@X77e4qcGS^7|6G4PsR$X2m&0 zy-y{`!H~sQOPQV4+DJa04uBdj*dmP#`Q^<58^npbCJQDmSgdX*D3zR8EmzZly%}h* ze=K(RQn{yqRr)aA)8|LCpIiFesjx7rA|3m8bc>`>md%uWl{O^vh;XxL5&cN&}bI?4v&`HZ?x>LKGCklU;-`RZ01?Ft;RZhV8$GJBuol8 z^CUrZPk<#hfw+$Re89_Ud)u@A(Qs<45)M=U2xPYFq(oMOo4&Cr)UZWm%$l^lazhM> zic;xY1M!;b@ePZhTQ;!%=pJYL#HP&}1>4nU8Hmz&>Y(M%w!zVc$rfB@DaQ91#;q)F z0Bhm@r1`GW%qPcQtJli<*3@;s9nUaQ!Ixe-hmf!�|}gsB!c;nU!A@st6w8W?A@E>~?L`-WEt; ze3%q_PYr)e5xTn$wO_OUz0uuYaE+mXrJDX=1rs}@;6YV`uaaa~)Dq^63oi%_xVS`; zrtb^ZkQSW0|4N&+oyc;dD$%t&H+#k|mSAO@(C>3b!g)$3E$kTZ3)k<(Y$FCF39@8C zz`0cJJ^8UT|9%`f5lpx-9V*hIK!CpO(>)5#7$K*nSWr}*K>M z9hoR*XhgC5yJqr?u<>=@q2T^vZ3cr5?`7B`;e#t%4}@$sd}uTcVesi?(#;O~w*tRf z8wnBk*x3e6dw^w;RfOFO+FJ8iS|iXyw{e zpo#sl}k;r z)8&zrhVdaReeS`UrL$+CK~h{&Kw1U^M=S1P-HmxM<;qxXS#rW}3NsvH)fLKU+mZVt zuqMlDvG|w5N7XQO@u>bCjYH$DQ6t)Uc}rwJA=x*z;Pds*tvK4H8gT5ltof7fl`vA` zqZa;67Mec;u$(6$AHZKnX_BS7A&(c91q71<$}NP&C~&TPad=}jq~>2T8b$HX5uXL- z?N!AsM}YD5mVEuiks!}HF?3{6ZF_->^(wI>veIf8T+9J*6C|mkTCj|kB2E0cXH)h`tM+opX5koe}ZR{ zCnj@+ol80IW}s+e#U9yI$xuvn+F)>~>$D5r23~ivX7YL*utp6q}DM_f(ifjJWU>Ifgn1vGc0lH!Wzu0_ap3 zwa+c=rq7G9JfhgNKq*3nSaFIUbou7)=bc6shiIKb&VpXM@_IWov;oPiMrcqhjnB=a zb9ph(>!?+)!BRjKaxVi(_e)d(pT3*MpHnZaWPh;h&{Pwl5z%Y@dIDP%&LcKw<5gt; z2Jha#$dn>md9MlHFw9TZ3CFvjSe}s*HR7*e2!S>!3>Kpg{GB;jTd0f53Hs_l?fAio z8Q|8y?PCp{>4fOc^`mHTie9)>m+xer)N=#VzM5lFukic(4KKhH#GBsBD4g@wPgeGSz#F}$VF@blQ)3><6jDu>@_82Ta`)p!D8gCK0dq%6X8 z9F-(`3jQfLQQ(pGI6o&akA6J(hXEr*+(Y+k90vS_L@Y@XgYnx)2cKBF)@!aLVyRrx zZ<_%1qD*O59Fvf2-dJ=HoWmpDLo4MzXCG?Hd_H;}QYg2nIrL2;N@xVE#QP^T*ppp6k(CWH_}$pe@iD`BEnIK+kQXiE`9SV^BKq$ z;wG^^>Z*Hh(RaTubZPtM;wkDWJB8oV8Ntno5Wx+esg@+Gv=JdCWYW^vUUi2MR8^+E zp_~fA6-%#C&RqjV6NwCY!(xm7#i60%Y}yR>Bv8fi9+q#?`-zi|v`jMA*k|a!U~ZeR zinu=hE>s1+q{4N!o>mmse9j65Xq*qfpppR zDwb`{Cvn{s*-}e_YmU9s(b8EP(X?&y#HM6-Lvg-v-={QZ#%T{g{3zDeB0>bYr4bV9 z=SHJUf4N;I88s55NkxIlw|T@0=~c&7ExGG%&hqD^rFVQ;B{zff5A>p1d_SOoV$6-+ ztiP%k)9X(?@Zj2mC4SSuDh2j5Wg`*$!ZuA%R9fIUJFq9~>My_39P5PGpPS^}?@58u zS+FstFjsthfk+>a2L>GzfiVG(h&DJm%OoJU6((spka|Q;{M*08*b(~UR3^qmN@A!> z?nW1{H^ZZxFx(&r7;Gl*q4I8Bg^X2o71otV zf3>sgQ9HWiTD~<$=_CW zv*~X1R*vs5et393skN$i{b-mGKpLK4U$9W;?Q*DRnX=e&lswdw=BCcENdz|4`U%LO z*0;Gez-gC#+KB+qymII)hp_yr45aZgf$1E-X!-NnYWc7d~f%Z!V$0m zjnJvW3uNnkvkqMw{^^8i0AOI`*{3>5SDdV^!ks}K{al@c2tgsLo zn`VNgS3aTpbZ;f}d-e;`Rd}gzhO7`m33;d*AUd6r`wACv@UHnSX_bSkdKyhl1CM~% z5nbxrnM(Wq__4$68xI3PWd_Gy#IcWqC>c2zMP6WLL)k|UvwD$iZ2&_+yuTX|m$Ldz zgSGJ_pbLik!!*j~x7Q1BECu|Pm!u1c%M3JEQT(koRUw6LuIp%L`!EkfX0~SC+h2Ex zM96Bk-yIR-Vq-LbeYEG)yjR-xka&*oaUPNsFOnEAX9EhTLpX1ZAOL0CZ#du^md1$K zGGN=;abxHkSDJe`W@|1~h+MD3Cy1ltY3sdoSA?%yPz1HNYdqZgqp1Ubc{V*6$sWzl zW2s8&M6$HD3~q=U`i`&8qx4WBm`@2Fy(n>B)33B*-AU82x(X$D-gRs9{KQaILzuT6 z>>;u6NG&`R^+;`&1w2zW&Fq1|qnwi9SlrlBKuiyo&h#6=^N^lg(6Cf@n&2EOa_9}@ zHt{rWK6R6o=lRbBM-8RSL}bbc|DfH8E3IBy1dH=@DDwxmwBvglLsg=pLV6V58efQ0 zXgBMoAb^+Co3|sHsrDrFD9}?u>eR*3Zm40Do*O{|!o#PK**-#&7O`%G?}pzQb;~as zscDjUj;9SrLYcMgk7l|9+mTM<_A?H(mcrU0Dk$?HBtQ1pzoRgN7b`qjH25GT4%HZ4 zQV%K$Hsj_bHHFcIj|=%!-fcHfvv$7go5MUmuDL9fN* zz)m~|X_VB{m~vO=jv^7UfXSc{q<6YG^ud_==Gq9Aad}#DQBaP_8MZY%(2=r;I;IDd zN+sGSu1{K81h`98zK=|MfUn@ufAn?ITf3}yNm9;A9NL3*_^q*+>Ajh^vhq`s zEe#5V$}GDKKLPS>4tEIAcytQ11@=DcpPNyY)_DmoL&0=NW3jrBn!*_H{7aZ?IK&ND zkk0)WZ`S7nZ3!ioLWl-SbL!6O%M8w1ck8aWWPET7bLf7#@y;(?6HTnZ_*Wjl%6{}# z&xe`}>vSJWY&8&Jjc8<2yhFN(Xi$p(Ok>YP>6Je>8viL>?9RuGZl1Z8L%R8>@&#$1 zb`;;jiPK@cl+I2e*WlIXw{Bpi;qsp%eZU~D~z2l*fsn2IpCHr zX+uxcFA5sME_)?62*?}W2vMe8%L~#fj?hQ-#@_<$F`Sj5wZ?%F-^O)R!wdfWrg>Pk=K^bHv>vRwQ3}IS;iHQ|)iV85(aPd2_9F0sq4*)HG+j5O} zf#*TdYeo5GF#jiZ>2Vf@8t2`CuAj{ zq|^f84d*p@zZXoY^4=h(BvBS3W#J2J2`V|1*Q=vJwO4>n(~R@lqqWQi25u~m=Ku0O z3v@zNb1C&lmpYfCt{jg`MCKGO(NC9ZDU*0?df@dS;EW!EWCmRe*?p1DKvfFlkLFpN z0FywBTX19Zi+@M!-1agQlx~DX$!BX|G55{(H~I-^m1o$R1minkXVR~X3I!RK>EZyG zlp~UjFah;otB$1uu+#S4BvMz(_Mq341!Af8y7HHe{D!;?Xc4IZpxyd;GU)Q$60sNk zG*{yN#{{LQ0tjr2dPU`RXCzX2b1Eo*dS5lWJAKXMHCU@IT@TINglV`IWhoX>nfZxR zUycmYJ-deuF$h{T=y$=Gy?cYSl-;X1(^c{D=VIH57EI^RVNrWjv2lg1Ps^EIQCs2? z)YP!~5D~fzx+O5mm)6m*V%q!UxTM+yk5+K_;~k|RFHG;!f#@F&5M2hJd!hl#6UiyQ zy;O08yM6!f1b$7bN+_;Vzw0D4{OKcf@TQoF^d1OQ3 zlft++Aor%3OxISt@15efHCcBCby@1Eo4o%qs}&+(d+JL>vjAuUN|5&=?qfZT zV|>XudT!W_8Uv@BaS*G_!Z=gCz*qCCQmlJSpyKNcHLO}XevY3J za;oz;na~xH2RUgM*L&}$xWdk+_sEVqvWy{tOq@9FD@!{(lmjfzi*TA~568ngI={Mc zT`5&?h#F2iKj>KBBxnPnGdjz1a?XQF%xg;X^km41?W=II7j{EH%UAH|pYsBgc}-%j ztf^9LC@5*i%F=wDny|?7KXJ|?F@mVi1x=bWu%Z9nTG;mK=>h?}P|%&q42-DkIPn$7 zsvoEOdX_O-&~wbuZT4Dl@X>ACF0d*r9>h5tLO^ZuUxHP)Ae}ZNyhqZ#o6|4_U^%23 zq<~(!Jt|*z3bzrZP1}L%lT5Z*_I=D`u@aY3Y?`$&lM@iDR+l}QspHg}5#Puw8_79a zwIkDT{Mp&lcie=zw-U}O#Q{K`fFpW=q%}&uUd&kzAVGVD!QhE@E3p>5g27IYIB*`- z?B7EX#}Vuab$M#EJt$11lFPmldYVjpaY9FJw<|Pq?po$2*00+ zVJX_Z)(+X?UP(Kvcm$7ey>Z`cAu$|;DoU79#?>^L543#7-xS_TLm4&Ft9pEtQ6j8S z&2bEfKc+S>b|N(&KT%9T8er-s@*Ov*9G06{IBvQiFRei62mwNYR1Wf74BgqfQo~eS zo6aT(O&B`M>R0E9LDzOF0;y)=)PGi&G5Grk%FF82&Bu?nzm}V8W3jP|vd~2AVhf!r z#Hvr1eXXNfA7Zdl<=`&lp4yv;ge{#Vi&1WT(hM;M@H&3WP88bP61=|_D(JPX#Cq91 zsR9a)Sxi~}OCnl`-dgGOy4<$Z*wsZL@<(VUbi(g@I&u3GC{nXOf-pZso|VOojOKje zve;(G^xFJM?GTY!IwgWFQwDfD7oI)=@DBMv(lnpe-9ECa`KS%3zHt3wI!CcU|mw@t4 zBORw2rmSk=w6`RoCrbo$X^Y_vyk}LBOp%OpHXUMfx0yFWwTBk*&2HkLS#Zu4#AJk$x+)7C zg&bBJrLQ%f<$Exv`l=;&&aM!22OV`PW0g#>#NM2AP%icJCI~YNYhy^#z;ErbaLw1YocMKigYQVFf%f&7;8Y5)SOzfDN0pqn8n zP41wxZRi^tZej3=KOBvz(eYIrgL|8b6l+>N%tYJaOTxueB;Kl<9q zpD7SoBM4Yq_HqYh^^+kkGSV>Pw%+bWT*K^xJ zqqpW@h2CPJbM*xkByZEWq$Q6m_}{RiG#c-VX=k>*-@L9-P4molYAG!W-)V?2IAWQG zG_`SW_VOK#rO@c@I$8s-`*ZE}*@qUGwTz8bQ@p)O7Td8Fr`|z{t3MjM^RLIi3Ipu& z$AULP#N*B&0M+B30JKYR`X@$adVzM_#w!4;p}B(zd4~zhvzkpNge4qj5v!`GVx1+u z;&V$c@sA8EM&Lqta->(q=0=O)At&?Xuwdf0TKM1@&*~2~AZ?Bz^5`g^-v)u1H`QroE3{ z&qu^vPWkw0xk+Wc$ca$JD`UQNZm&$yU;=4kpptqxYDFH2#dQp50l|h&sOx?HbZ9Pn=Ad>%5%xby0i?Ze2uDU(Nu(HUCeiI-)00kauGT# zdz<_Aw%ueF9Ma~SRYUvJl)C=fo0aT?PCg_Mi|r3!Uo_S;9X?Z7Yo{)WoynM zIa~L2EcQ2H$?%P4t(t=J9`l`D8q({Z?YC-EKr|3jjYKe&4R>gH-%foeB8;Olm zBlhqQcTc{&Y_?!>eVYq=HM(fTvnF|T>Fu0JGp=X(0y;~1J^Fz0MlX72dx?@Hj#k59 zqBJBCCGJ&^%QMu8r|HvX5s=ikIUrzyDaQL@FYq(&erY8AJ3Y*@#rE$Xj~S1(xq6ID zi>`n-B&$*5HK}=`%C4;i`H$Kv2g`5DV%beCeuS8Z#^dOao2boQ(Dw3r6DhC_gZS&a zkk9u&heW6R(dsX`&fRtOSrf=`N}Jf8p;X_|K;hvP-t@(wvkVr2&ZsVJzEEz2p8K&?GgpNsh;mN|$UU;zlGRnV zhj~4NAv4HRymZrkLl27{HbU=*ZZ}!HqKb-9LwgLASHe<$F&N;uDjUzyX5px)!{b>i z{*wZ%GxoOI3&OGX_opl&iOmn=yaW-gsmSP){}&+;_@V?@dmC+S?OTDLjLFqbrXncR zEo9W#b}cH`vyjtMF(l|Qed(4u#Z!Fn5P40m844&{ifhK$aJefJ_k)_8$`I370s@J| z?86&Lv|0vsls0|*5`98X!5=YWqqnEQsGWD1-?e+Yzpr4ERm7DCs(iWT_44@r^F2e^ zbbMqVS=gQITHA(fHjJY|#;F%PjSTp>y5MqoA@Ly`bhDKNZQ@9Y?6zgj$~Z-*#<81Z zzHBq9*KV%(O`<fvN;lBHB<$+IqDfK7VSCixl+T17|{T~Dw%u2)=16S{tG zr;6n>tO}G**INaptt~-YSv9T$P~=mCf^xIOkvp=$?PlrbY3M z6$I4DDtxLHDhAzZ5fLAHn4l_f%|fS)(=Mq9Whzz4`~q-fW!}>iW>yfj#UzG{L0opaTO(4LOARhy4Y zI2%KQ(u>!f+rJxt+dflRAqAHIbmQQX@+d3-)Bp2|EuIQFA+af6(zosVDX)z5tvTqj zKU$3Ox8|V%2-R4s-?+JianL#T%O5?J6Qca4!7Ff_WXI0a?t{P;DpaGiWxTSsP%ZbJ zgQTw_T!Lbka*w0Q3=)K=v2?859E~S~`NCS{kT11E$)q-cpWv+lkoBr$f4$!%V^%{3!Zo-NqoV)PMY0E4Gw8mNhV&+?i3zykUCKx?d&L0Tc&aIBNv^v~;j z)+`t?qrwU?d`(ExBAKO6WSskUhgLse$&+~P13P`iD0(L)l1>2?7p)@!sJi2WoTWlv z^FcYbs=J%FN1FsIF}YQ;Aw}-#@b3K7Lu%@%TcDFn0fEHf&)QNDaU&^1{5o}5C@Bnb z;Yo0%ZCGX4g7jFn+Y6b)w$&n;AiF$9ei)7Xm1&!WH}62 z_yjk8op#Mt=Kkz~UT^?>Sj>Hy_9GCck31LLt2(96_Prjz?UP9QY3gdCsZ!^W zx@;kK0P*n}Sd8(*x$M(hZN+WLSL~0g^F{RaRpeg=3jX6k-%{*KOpUsbc1In{--W!@!IZ>=#X4 z;1Xs{$lE`E?;4B6_^^I8t-H4l+ee?dJQ|+N2>=-Lin8eYN=zHQ$BTs?0E6lYdKr2! zs(Z+Da8}!(Lq+qg1yZ=d?OYovmJxV$qu7=GhKqgIgKY;%7hf-fH5zz9Eln});9`Z0&VjHZxoDMMsf+I^W zYSKgsyZwCY0G|s5i5Crvzxpt+KLv6uYrEPfm(7>`}ezDS8Wg%ah z@5ilzO3;#zo>$CJRfOS*4vc!dFo;dz7is0S5YEZTLa62u=&><|jeZ0#%LFvuAxiY( zj8RyU3Dr6VA_zE<4X?u-WEj|o^+?H7uXd+m^6!((7gyT?SAEWz6 z$)ve=PARngp$ek9kH`%priXcLjpVO|R9-yp((Q7nhtcFqg}YJ*wm|3Pp0=czb9Q&RCJff^K4hq& zi(9Oa0D{p7xl+XTq?P+C8Cw0U-yO^bQRbxNUtD$p2Iz4d~JC963Fy z1^fQDR^Z=zk(A*maImV4`hfHkLiWb%rkZWt@32o*+Gzgx+~M(zBRC~KQA5us9LGQy z3Ckc$^1!#9{nr?+H4~{|YM2XyMS6Y8j+|Yi*KLFC&S8q0MXufYXMcjmKw}>k*;{&{ zAFgTTdV`{-Y+r#c0iW>LWutpTuV0H`F)z$Z)KFoT(V*DaUdo}=3j8im>c{l6#kx0_$ucDaOW=u7f`971Mre^`tG+T4`vNsLsd4-E-(#>mxjk#@ zPT1Ea2!q_NSd_^X3=gjgYh^wY3gF_CtMz{5mOaGDq>A1=K+kwTL+~k_W>9SzPKv$a zQMeDmq7?$l;bWz=D{>W-ljsDHUZ|3Zc!V5f)7-zR?fGU6?~A8K)KZyoh7}VPnfg3{?(z-pJ}L z!r{rWSxVB)(K~w7L6n#3&yvG!iBu!`)ZwHQCd)cAMJZYlNEMx1ESh6F-j3A+7z zPtU$eGtd83eLjG>*dAaQT!PMS1AQhM{PqqR2eAG_uGW2}^jM&FeF(Sf0L#BkL?xY- z#SX&athnXm)Q@Qt*Z}vq-%rV|z!N3h`zaBiEQ8gu0zW^toWnQ+S&*&Bqc>|sRuBVq zuE?->H#f7T4WZ4?-`w*|{CZ_wDn@=%^Y0T4WOtd?SnADoio6PVKgxJWSAa{D3w<1E zY;1@S!%F$zvOVm}_o%|pe8Xj4&YKji_MMVm?L=?spm z)TGr+?glPE?zOk!l~*@aqhBXl?W4ah$rhrpH_oD>xt*y>v;s&IhQ=@V8Ub0pAu?Z> zuCS)4mYh;Q)FOu*NMjB(h$p77>}b!^t>OCD5TUX=rah-#7tc6-;=+M3vvx>dw8c|- z9qTsbxIFrF5|m+H&BYir(19qLWMj)>yXi`tB=M6}dqb48FmxDpPHHe}n?3u0M1|;K z&=%RD@Y~m69*E8Ct~Fk9Ng@csohw7IlRUcQV;2G#RFfz1XC2Mp08Hr~t7$2yW=3Ht z04;i_BEuq9sy?Tzqm$ELdWcqb`a-WDjT_EL9(V2aLlAfIi?JOxzT`K$rFfqc|CIUL zdZzBP;Fz2@-X+WqNEf^7EIIJv<~e>ZGHq_uDF9Npc3f}{20Dau(jcBCibdEmP{fM| z0eurDO^HP01}~b{YV)y=vA3I4D}9&iWmV4?;ok4?Dx{k6&Ia5B2J;j_0jZI`3=Y-> zrdmR(at4~>o<|E_-*xg;mT*4h6;C;SJ;9j`;#B`%zWa#Bx;+Q00ee#v@_+gDnXP{) z!C_?4hFBD%43HXK@Nl5l6h74#vWg*5p@8)w z9&i8*gvWEuZM9&c%;9S*_rpNX_dCO#5jdjbGzAnmAZbwVUIAC)W9$>k%v)jC*_wIV zs;%Qjxl2(jzj8w?l4m>`qtw7y#7sl3Vd>oJu2n1ft_#>3yqGsSI)B|Rql^1e_f*Y% z2=>%$L_>$N=Xkwfk*zJ*0@sY#P*sY`F+L7=fpNbuuXmjdQT7`c5vNtnL89?MI9L@a z>CEL3{cxJavYqRv(mtlC>P<4zvea66ej##COPlZBh1^K_K{W%GG9QjYql=8Cm=|sl zy94|R!e%!0tYV>iOZM3rzo~T6Q>@`@)SUr$v zm7hW9@fZ6jCHT*lny9Wa#LIFs`C+(VGtpeNO{Z3Lh>E%w@=5|E)O^&T=GvGUE6nG; zzj*UMF>?%o1!!gnNg`HgZ_Uo+q#S!F#YG2*UL%vTL*cwuSbC<>%)l(k`qWFPq|te; z={`BWd$2l8_7EG-i&vB*Ya`>URxg8@r8sp!*oJ+e zylFJ2D$rbHl?cq5xPkropX4qdZ z+`BVa=N&GEchVq@OI3we7b1iA>Vov}ut{WBn34D70gTK#*YU!BH9XBwS*80a@y$h- zpd=U8J9Rs3ou)WMm5rc^+5t{$m*}%btXV#jD_7Xr=UbUatd3CiCPAy$BPe&E;$+kE zxU>?Mq_|6$xIqwHwI43SEh{N9!sxq&t@(dMyM@yr* z(2#5%#!45IB&=Ru@{LN501haE) zSOoKCu8$(u%z(Ayfgk$_G}-*(moVnMSNdw*?|<;$DaWtevNSDInCfMW$VsHz6WG8l zbX!sUtlDOWQoLm#d+-2#A#V%b?G$!Cc2Q-$khts6J1rX-XR(cV-1+q_;&<*NWktF{ zZ=hy4qLWdM9h8cj&z}BDeJGv@Pdmd>mz>THY+JpMLLLKGnd|H-?3y7xii3`!^!%%xiAA{xcD%QMbcQm z6r+~8Lpxnl#@2SKJiQ34%OX1DUffuWQP^Jr8Y7*l+AK>rSBwZu;0{CdyPn*-94id%qs&sWz77TTcRqRu(HX9 z6{^?K{n6DS66z3yaNX>q(Ai+}dpURitHVtLhg$5;raW3a7=wUjtN{DX*Hoi@ysSl- z6gWy|B*HB#%juHMfW?p&Pb!dTTYf_004Cr|m05KBtd7d;R;?ayZx@j3(>(qYATh|9 ztSt;gQsD=;0{mq&`unJvSxgUHO27=DHu#A_2d9~ZtV&TL2V!>VHZj@4ARx=8kR{G) zm_m0St-`7Z#f<^Xe_IntBs&DD*}tH(9z$e(hXFE-O)5_-YSS&l<-E2B35DCgxmtS$ zad=reV(7wdsYc%*io%pFc(Bibm5n#`&4vfUG4Nv+gwTm{;Q7cwebSl zm^Z^6036G3ma<#0TS6%+^K$>4m=7fW}UVdpd#aebj1Mc0bGQBx$o{amxgTvv0F#-h3V(}YXv}zR-we(erVwtWrV3vf|Woo za?Gz9QBx`J*yc1eGb{U3*qxC?mhwHLai{Kk@!~|K3xi?E=vXT4x>DA-sy6p;Lvr48IMWxbH@je=VLsV05(zqFN>@sajQI^_lqNe4z69Y)+H1FUa z;O~CyF&Lb_F*V+_q1(JlpKdyt)lccPR_NMV-v!jdsY}8*fpQlxt;RHp4B7r>E78=L zc>qOJByZ^Cj4K0M7_K?_xzM4X8kB$;#Cn`KJI+B@ffZ2FIa%ogT|+rG2bA+K4j8W6 zfiUXLp=Bj){wWJ6Wrn z`sXK;F~}iUoU76at@cO^dL{7pG*eo+p)$H@x_tW`m094rA8V?-d#LV*imK;?FwLY) z!|0I7&J+o@{pyajD|m%w80pJ8m93cEb+}AE^%N$2Fda*2KZYE8?|gvJKtNK9*Hxu& zzQ2txjoHN%`vYiLC3>5}*@&Yv^{_a32pp0f*+!dR8fB$^u#(Cd9xgi>V>#s)}&i?+z>pC~Ql;b2? z6kx`C=}TB@A_-n}+BV4;k)5EOgBWV5E)aVAmc9DWmGlu`qL;9vGV$BqQ^7R$Y$JG_ zFn`}qcA~~Ae$;cL9Vu>&VE=lI78n@=`G^EjLpkyNmzwW4h?u*-51yL-l)X%y4qtAcQOtaNFiR2U>^8*Vsngr!3dDofCVU4?!4Akk8shkSs67JImLjrU4sR(Z z0+=V7U!$5wVdP8@lQg(@ZnAj9=4`Fg2IDSrFb3ZWk9HbZ!1)nBJ(+RwUQf-;wfY?g zvk2MjnA*Sq;_o4!HcD8xKZv@|#ey-6l_0IkuM@FP44uCrmvo3l@&qQsS2YU&2ny=4Z;FV`-gha~a6WELHqx%YA71M2u0NJZBHfzg86>=E$hvYgLRZn6DpB zkpu;k8xv%D4NdWJb747JdKgmy6h@YCTyz}PxxbprWn=3E9D$%1FyZ+r#o&365`sJ2e>T11xWLwu1=ofB%3C5%L|yl zWaxr%iAcKi8gao*rtvy{RFw0Qmf%v&-8bM0X1tJkmjtiqX?@ELXS)_kB*9=y*BndR z>EhOSBkgMkkzT($80noR7}PiYwk(CGT-iw+EN`u*4P9OuSQR+BHS*30ZbAr}2|Z%C zZi!!45Y;Mq5v#NVB+UY|fC&^^l&v?KIZp+OjJ`$6@%8Z0GVfec4f`?idw#7$?!8oa zJjVYTag?BUZK^n@Cx$SX)&7?^KE+?nMSXB2!78ub7{-j59SxwD8H8Nk?pNzPLgG!K zTi+`py|z1>vp}29<&2h0o-3Rbh!Kxc1RBtnsh1L#I7%C0N{1@NmP zqT4p-mrcSK#ZJW;&<<-v?o6rOETY*6ksYp$9@&bYp$}&jj>%A?+h*YJ1fVdB_ z<9xwYi3u01K~d?6|M3bqRr;S+Vr+AO_5uLYo(&W#s32p zxirf|RL8bCW``89@u`=HWKHW15{pbn!cCM)%U^M{(Klib>+T~&Tgx{T5@2>1% zQ$dmuoi9Jez&vK=79J`xfr!*TRvq35`c}nVK(gjR}Yp!UMA?Cnupp2eN2+g}2xnT)E( z03czg0l&4F?B^8=k<_GBSs~Yx4teAMdTD!Rnt&`fKpLNSi7;P9mO)RLZ@0+Yu|MG* z3hr$Ej=ioTQ)O={1nr}i$+AY3Bl*+hTSU^>&-JrvZr3 z_O7@n?pt+Hg*X0;E@jE8zYWLr!Jg%>pH1)6S6EajY9~Wc-Gx|U2}JQDzu+pcLS92! zO+Vf@sr3~1h@XZ2lq4jhs}JXbhCkr4Vz{OP)co_vBl!RDOu?cj`rY9cVUAbr-th>O z->vwXe-f$7DJRKwg_961zwuVBLtCRNgOcYLra~ysrfn=o621G5wr9FOJNuUXvb`UF zDD#;%#@yu~@dHvpw^kGUviHN}ECLg6wpX~*w5b5q_r7ewok!HxnT=sRI_AF~jzdJ?yVeS}ap zRh@{=IHmJs*LZF+M@{V8&BhrKwi`;kou;4pW=0rFjfc<$; zJi()@DZ)g>Z2y+1QWJcriC5v%wMI{1oVTnLQ$Bx2A=M=N@;u)r9SbETer+(o zQN7OPF{MV2Ei773X3tKOvLu-|C02nxWK(`a7}PhOD1jBWbDjwZuS0jdnDd}cTAmk* zbYz<5W9V_r51syj!t1MWuOjOR7u3$jYo@+XCZw9(__ zOq@YJjghOZ*34W#6`O8wu;%gTw-xc`V-5qk=#ct&A5!&rJL{)dR3tE{w#ao?Kn+OqliwIYp~dM+J9Ded1S<`hA|%hf=m z7f5y{%)4)tsh|W*TuiOGysvs&su$O4FfL(t{#WE)9CR(3uT1|i-9@aa3Bqwe)UKXc zY?uhP1B>DigGcUcqC$QIqW4qr096agZNN2(1)tgw=IB$^$T~HKi==1+f((HB9b$X^ zA#u@tr}Yt>AAhgVg$%J>F=N)P9D=P!$f3QpmZp$4a-L#kbDY$v)t&Huj}0fON3dGxLy7vv zkJYS{CYqqpGuc(zL(KjP`R~(3fCrENsef#vw$jy7GuWD=Zk*AzTdk5e9~%4$$2e5` z`kL}&BBg?`>&T!E>ArWY%7a63T|)QPE`#|Kfl3spsAxnL#zz0mU!Ujrj_lN}vU zo&m2i&DZPdh5Sfa6lv$jd>q(%M!O1=Y&p@s8KYC0uo<(2fQ`!{#_a7Q2_81))y42+ zP&jV6Xy*&Z=pG`06XPm4mqc!W$k}Qe@m*Yo_5VZ z9eX8|dK_GI_+(ptq9stYb*VR~b8u21EReQwrzkIH$hnC3<1es^$Sls{0_ar@vqX~f ze&~t8T%c656m%ld>ZaCIUw;e~1NofvkpwmOqMm;}(|LiHt6gTz|1_ObLyWEt?QU0C zBnuGH`sfbO&5o8?G<>Qu2)qblB=DGxHJP~Wz3OOz2*t;%=Z%5hCX5)du7IqX>c9i@ z@wdncuL4X!Qk4a@5YEEX8%W&}* z^rp#nt>g2h)A)R)TJ&w2>Y-tD)lj=L=9*vgw}MQAA)rTbe=?`f>AAyPSfiqaW%I99 zyQ*6;R|Z4nEAd?Z#j}6v2hba;?gSz1fL1<{1>^Y7gMNAsoJ6|t<<$L- z9%L;7=*2l-yRKEa|LVm9ZSl{!*vIi~65h8O!-HFyi91Q*CR6vZ+|lH2bi#M(%yz^3 zHGWT_0housK<(M2yW_Je?J^9jjTNd>+;%B~LP0>9gMTF^;?T$sGt7geX{%;#MufyG z6;pH1aD^-i)TyHWT?OGWmC?C(#HyCKEq6xdg^pX!VB|JDs;;hm@JjN8*f&OA!dBw1 zW+$I^w%~A@@{BU=8enix`R>PAuB^^R zkUkWoBS$3O(h$Ip3%*Af^fU%KRvOecx6a7Oq|mb&aTV?>!@PUZre!T|vL&!c$EE8y zj5MG0K7Z5jC)uKpn(-E+y9UECQva(6f6%AC3@59f=pyd5I8IRS=GIJ)2I}k2IVC0P zwjfYIg-vBoG5R{|A}__H*}K99JXk0i-nQ_JJ|)r`wWk_vNN60R$X_R~V@CJ6axXs< zCg9d6R*L7v*vD#?LroMfP=G}(aShqjezu_%?s!KeaVoSqYctFiDH_~Il%I2Ts`2j( zzx*ej6wLYPX1L&cSVr>N;RJV3LT9ORrN&Pjza18=oAK+q>15331JYZHp$MmF3)FiN z?!wj9yHFAK!~*{*bt9_`2Wb23)`-3IBUT1`9O%-HjU)0>*#X4l>$uY8M&nJcG0YJ1k#p|4IApX7C z6zVB)AGy@bGre;jBkMl;E)D<0mM7wW$utI_(EEnputW0WpC}4Xc%_qMRE2_r zoCnv66VC1nefrd!$S`72o!1@eZF#7|NF{HtkVoq){@jZxb)%I-?0qM@CMJe;*PEf0 zeoJt~e$RgvOOum*Bh%CVHbm%tHv+0H>2;3t@#S^8>eWz*wYiGs#{wytWw*0yB)_mk z0H8bLwd~e@O9QHgVeOC^2UN(?PScv<)~U==W+)z4A?QeRF5~hGIEkN* zdSktk(8M%NGB1g44r#G~l+pbumFvXyR!4m1wUpnIQAY+Kuj6K;x$c^>=>J}{=VA;( z8!|mIEAkxxhs^?UMmaN(Yv5=gGbV}$^t>L}23jf=$#sPUr^4&!?C>{@ezG;Iy3#Ux zcphC$?TE}Y4;k@OT?{PjcR7&;mX1Swgxf9I_$`6?yf)bxrFKju$_lJFf6^LsXMF_y zU7KkirM8uqcR;2ec#s}AF=MBA^`1eg1}(zO=~hk1%imxHR%MWP?iTU-5~8G5J2enu z1R@G0M;Y&q{st%i(SKss_Jv^wu4$0fR%^y&H|330uXAc=jj@?-_jB>Li4=>>m?_Xn zKaB%Cc!)gIELynk4=Gcw!AMBgik&zIK&*+y-H*focoUH^RF1p4-35gU>Te?~BsIFz zjPNK`G@h4)9dTCL1{*PUrrhbh$R)*3}b?Ny^JXLcmOv?@gS&3@~ZVEY|*#y7?m1L^PAEBmcnn< z>8Nz=Nk61tW}D_B_k3TrhD{|kp-yDpejLmf5VvQghrHBwTf81`r}nHj8wPIlT-3$- z=#{pzTBf^|uebQpQXOpFRg(IT??{mbCjAXd z_zLIz>Aj?M=za3b=r)z1bd=tZ!?F*zAe*4W=nuT03EWqUK0E&J>LlE1JhArL%AY`}e37nBtAol&6~~N^Mp>ZDd3cVB=X* zv3DESo&@bEw44tEdwpizF)QyEf?6+hXu&6jpY3Ac)%h#^RzluB-ake%^LTaTo zlFSG2l{0-QnZY5AyK1AYCHNB}6uQ3-0PElwrxh2Z+y2yX^7F?7Lf04~`_HuNThRhx z;c&V3tO@_;29uNQ>dH*FL1N{>CO+&&{S+xo&UfRc(I8Y8itGx#GXnurw%xEd7+G`= zkmBwcARCoy)q_MP?_>_<%OKlZFyKj;2J{29Pns;Vu?hzdxq(-dow$P}vR;mpv>Qp) zCcuI)!k;g3ja$@CbO1X*#J@@C#e#iWEY3LI1JDG-khL1U(?p#-jFnt-#vna7%^keV zt?7tEQeP~CE;R(i*(H+W^)mxxl48=>O)~?E3%%7sz6$0aS2(mtHh3_vo+H-A%)Yr> zkwEl*m-X1PkF3EzJllFS%y)dJSK+>*s~7Ez@o#8cst1iNF7SY%lV07=idH`^XqhfW zY7oURoC|z+T!r0X@e?2I$y5=XDuv@H2;D$3%Sdbef~1`_vQD4^CPdp<2sIf^*OsTG{F zv?{PV*-e#_G;cXUm5D&oze&O#_9U~DDY4a;3mzwcJ=SU6M5~l?#U5^U4hhFhm)EEv zf4ZnmHx=;*V-|Z>nQ)7E!0o~Lyw(>SZ4P+>UK6_oZ@t?!E_h|9++gCa#l$y>m#Sit zQW1KQC_szs(Zo}w{>fn_k@{Kwlq&^B$yP&V?}_-L8<2Fv50ayr;?I@TLSjx+`G3cA z{i#CE1*NMEe`JdwH6oehWK)MS4ekAto%)mRUt;?mdY;MBXB-lD9prVW3l{of7VRmh zz>d!+H{}3jcd@(xql3aUo_@1$vmjDNR!sjjH`-=V%%uV)u6&CJ*6E`K!_9i}hon`t z0HDft{xSrFswp6G>&WwE0z=#iJp{8n!aM zr8!plUS4Z11GY`tO{*)XTaKiyk}dLr_p`X?4||-J>o5>_Qs#M%{__0evTxEI-l>H% z`b?+36&mEVCgu($oY)u{UR!N>5)SlCDe2~Y{a_@aIbc5Soh^d3)rg=mqOSoaX*b< zll7B*D=F)Q>ip2nLW_?@MxyXI&*(t~j?j@&t@J@mx)Hqy)%(MCun2TCt>vY6w@j}Z zUhhJX5g2-+pAmla3DkJSiq8en^gU&Sv}*$Lyk^A~4^p<2BrFr0bic@)O9pv~4dmBo zNtXPfDbo!3Hw9t7;__zc!N0jL2C1^VLCA%1Wmklz~JZVfc{u^=Epm4<13fW~a7 z7W|Z-9l6Kcn!pb>%hSxP&W~b}JF@ffLsD(L0|7KvfgV( z=s7af(zU8CULvaEj-J6ZEDY$nD^~c4ck;L+vChc&BN6G21$!446eWJt$-w6Qn2Z0{ zCXW9-K~$e2KTs4~0lm;^&7i=n8?S2<|Jg3rjnv<(80Zhx01on5lq(VoID+w7_8o(+ z=6@qrL7B#Rpp-_rfn0kPF?GBC*W!!v3P7_Z_8N~y1dR}TeoW%Y7m)>QVo8dyS2P5y zUWzj#rml`DE#Q*-oj*K>d~R}5N6mg7|DvXLn3sBDSAki+DnCf!$IM6D38|&Hw4yhk zI0FW8Mc*Dy8<+Cx@-&Vu6(%yNlISM{0k0o}wzQ)x%^8j^vfima#x8ho=I+Z1+@CdJ zg=V5&dL*CGl&1oGL91DOTVQ?>2*VwY^~tO!<0C^=sMM5B=sj6`QoQh#|G#kXXxl3! zFaBfjnT9xty{0NB8B;Ss6%bB6a81<|cy!r+{%QS1``gX@n&UO`n#1p>SZrDMr65M` zM}#1R+>s=ZQ?(3fyTPClfiJ=RLMEZ%$HwY_L6^=R12VTZt2;E2K2&tBadu z8R|uKRe%jT``e0u+AQyL+3lhdt)NAOFg1REiG9ar>fjMhuih;+UBQ{>>=0d?Rpx;# zTRe$+5zGm!m-1Ofvvh`ukFy>~3rYe8kIX84OdxHH33Fv5`$?v8pblhcBWGFO;3-Y> z?o}nGGlRSAn`JV3ssUt~^2#>IEcDc{(>_=0+~;6WnE;>>a=annMc^RCZ$ht=Me-`O zWe^Zce538n@UU|3luDPqFhWyV=0o1p@9@rL2D0jpKTA|Vib!){wiPVWmZa5_QbN%t zB4Gn=;v;wAzVg9SZja6-TNVoAe29R=(FZ~N(=v`?c|aepOQaBMn!h6Og+h5~RK~)n zug9<5994dGKUUW^D z!Xsm7HpjA=+N>ZQS?7K}!NB}5$}~1w_@vWN>ovtRZkg0!9L3H}MU()C6jvYpfxr1= zvmRGWeZbeFa=m{Z>Z$d?VSoIBV^{oMu2Fdi$D?I*q}JDNxoErAkr6m%*x#rD|I74_ zvF9YCpa@CbOxC0vAvGy)dlRqu2sgHGOP)tUwhpLKKrbXO5c2w1=fQ63)G<9kG6szg z{(@m%iw*8+5KgunTztZet+-M50a1Vv%mk2Vv}og8LFxvC?xL`!yvK!h#h**D=5Y;N zx{zYqbY3$w(7Uym>vAdzg#%$4ht@UsE;*8JnC8-s#Qxo)W8KeJ#4v9z8^r62wsDtj z^D6JGW7Dh!ydzQV4}y64q^G&>(z;%uX#7MkJgPF4(v&li%nN)5k?Ai0NzGbRUM+=J zauykCP)*hQW*WOwkQ?O)JE2|Lj+0Zpf5j6xtn7e`6kc z_y^6?f^w_#!p|5}a``@7AN|*jMqE;l_H94|o6&3q@v+lw`?NH5O%!8_uctJAuO?ue zUltjyiRiD)i*@qXzzI*h@GBhpW4Zx83jM&@wmBTL2xM&I){G^yvA21|`&A=L8amdn z3v`J*LhUJE{cerMEx)?fX1F+wc_j!>|LmA$x)+{8)O!ww3!pD7`+ z*=lD*a$7q6;F!v@hw*sw=jlT#bz^|TAOpA^w$^=TVBMZ@0YNG|CAzS6DVqk3R1rF$ z!KwCMRpk-k?^RZQm}ejU#b8ul88!bSU7dwVFs+u<_)lQ|DXfRYvKm}KKEu&+>=TOO zeB#zFFRZl!xK8$RkIx0;%kpd&xR)K!f1kh4@OZ8%i3w}xj#zs^`ZJ^f3l+qfZ$o4!#_-rC#_8gh;r}48WAHOMt1L{m>pCo(zML z&D3PSVGA}j$3Kb>4LyK4Jf_Fo)xlrx%A0v0#hp+9TPx(tvMD1!VL8Bzk20KvMX@K` zw8N-_L}Q7bDF|LS%>N-Vn3-PD_o_aq`+jUJ0O9Ci4?5~cKxbo2A^v{m&IJ;`iRC4? zUw>o3B~s+mO1Q)L^|ch?MgnH6@vnjF%d^%r!z`&j`AOi9NE+uJgITdWZ);EUdZWCE zi4h)g5+(c1$q)n8O|i;vbs=pm1t_ZtW!N4+6Ssscx> z5V<~Gz0+CApnI&d-0x`Br1d@zn)dA=@ELRxck-&4zU!*ESq_+~ZE4#eb6sXvnjl6# zVy?xMX@uvHE$B-q49gdd!-Jpi%qU-%KBJm|To9c!L5N+IWo>i}M|(ZO`<(a^5xj2x zWtl&b#}z1{Q+2DgAWXk-nSb9plHW>MtHx#yL=}wh^rw8XQ%8N?cM?>~Q~kMy1)3b; zoR?|FrI3L`L=mpt+Mi{K%Iqce#(0xlxhLQq6j@WzmD z|1~Ov%@Kkg`~`Q^JaxLvcbP-}9w?urJ}a>O*+sTuLC-?Ru!gf`@^uds!~piCxtYW@ zFdxM=sP7`oS_w`hbz3lI&vE!5IMU77uYeV%4Wx=^Ol}^9j&!CdN7;~)B7p@^yl3eH z>A7QHjYgEv+ADFFtW4kS9vd2^+&o3k+eJk^gWI<};#(fEyTcGXn~mK*kyCq?eZK^g zPqaeC$A%o5y%qKFWT{IV$h1jYh1z9F)w?W?#m1HO!-ytkD;dYBLm#X^@y1p~rA z&{c+9_Pbip=MkBV5+_%T!M96?0=^B7a~>Hm2Xow%8(W>V*t_vzi?!-@tCR2?xoO0s zjD$_$JyI~bs1yCQdVQvJBBI^wM321d(wGv^=Rc^uW z8$70{V{+9(*-BsMLjNirqhSLNKR$u8`r$nXJo)x}R0IiEF2Jc2kIK2SYD&Ffr4vka zok}4$@lT!{k)zETBVjqhKYt7=yqAcpU_Ww4Qo^+ItB~!>^A*1Ow}_;iU1}?78(~_H z;h2#xz-nTrXYw3V3QseDNE`p=`VCs0Go8XS%|d}-Wi0Mb;8kMhc8~~_8A!0gE;h|3 zjEW!N-T-J^PgqdHWCbEprPq{IFlJ5Au>UK-wQfXeND_fUa52fz#Dhi(iJZ{ z48OdnFO3n|8%AaaGM7>~_Nn8DB4*o0gM`Rgfh&!D$!|Nd?{NJ1eSJs;jM55o7Fr+Y ziGO96HxS@fb^J5BBRPb(G9G8CeL-5p`r%gu=Rq|x!`^QNpSm|J`M0D0Odj-Gm2X_5q4Vdm78p*Xv69} z&3h2iz^`D6>uvKLHHzWmp>*oSfZGy7IUYK);@)gTUq$JgH+Nb*C=`Kc6)8g(g`IJIG! zy^&L5+7^{OFqrvfu7ouL!$JIJ6tsn|QfRE`$q2T-RNe#a3)VWo-uy#>;?};%hczpb zPui-|tu<4PK6RdWf}Wm9!o8^P%W`D$l*lbm%r8Q)(heFi*P7D~{$CTU&`M?m2vMf^ zWdCMnXE%oTV5eGEJzKg#Z)Z>5Vbkv~U~^l&3g*H?l&GQlfkj(JC?E%}FM*qeWtm2} z6*(cd)+0!4fhU=C0JXby=@I341wJyaOcAToQ1~~S@z`jx1G{|9-~^$b6k)RxtTL0& zwD*C?s=x5V^`bV4Ajla12om*=catRDcX0t0!6pXD+eR#@BgS*YIovfI9H;4^<4ZDr z{Tr}NY1$UvQKgE&&KrP%GbG7(XBUwKUhxdX%4|9IqgL$X^;N>5(XJ?>$?dHz9-=q%BUEAfh(&bc%`e_JZqq z=Uw~LjNmVkPW74b;=?oC3jO6P>~Ur{yLix(We@9c4_hkX!IX|39Ter{;Us#VnE@l7 zPZtXo+$VeVECVBLO`&SgxB?=#^V}I4gDKNoSQ{7BmQS0Ib-gJa4sz9wG_}fpMs1eq zS&uNdarV>@U;!?R7`8-;+9hBq$|*jyRiS+*i-o~{Rng_oYw@GMWG*eBLnMxi4Lk|uvQIi%g6#1SJZ&j1n5v6VNBXL;m$&i=J!(hcGO zpGu&7<3dY(0C!4zwZ0`^Dk%gRs5rkyS#RS+O+C9N>{L=+k~g8$WxC>SI(MS@ z`B!HGDg&M#AmQ&N%pbZ_kBRg?sO5wSv0>nfIwTKqsF65rY_UGf$;r_`N7bfU^0wW< zjq0oI!EAB%v9zM5byZJHj!knNUP6<{tMt}S&A{Tz+3%7V9X8HD6(+~Cl{)#?2H$hR zC6Ubd6b`9uouEI7eJLcO!WqF&L+jmEP}^X@biUG3X%A~%oh461c77z#?~uJ~e2B_D z7id<>?pU+lC{CrD+Jltl10gRv=3u`pK;>`Y1?Ek9fEbj^C5I1!>P*m73!&+Yh5o-1 z`DDeOEk36r0a&qBMS^vS7=n*i>qzK`4$rY?5O0rvb}#`pns~61M$R+X!0=hm1zwb# zv!{X}bK79OA5HbyE$un>N|(iwS(ofx-5(^!-e}t1BWHFJqeIuBR0jcPa|?bABzrj` znjy?&gajW@I~|uQ{ksUBszs9zguY1~mo`R|KMd~8!S1>$#x6FaLO1JZ zGN8qr{9;Lul}&mnaZWzh6a}8y(!Y|*C^quhO)5IgW$o~abj$Nfd8aF+u!K;>&XFZo z`%TUyKJF?(&xQZ$y#zh>oO2^bB|sdEz^w8NP)NL5+jo=XMdytEcUoRz>_EFcWv zTIp?(11{W$sPQZIsdEZ}t~4pT$`SI=yZV!gBM*c0pL1l$58%=)fICMir6PL_aj*h1 z|56FJ%Tv9dsx!OGRg(9W&@{97@)`jE$}S?^0LRPeXWW!*=-pRy+Pg6Y0u>~_yIjfj zAX(_hK>2@;;20>9%}zOMj!07Y9O7`kzK6T}nML%AL@%ljF#_7KT!8cqfkOTXiQZnX zxHNv)T>et9vOf&O_fcNkypRJ4mE`~tHcvB~cYM`5L4)=iNKTpQding0WikGNy#O62XimqQ%uQ)&}xh`$=I`xgjo-X!prAY?{ca04I1@XN1vgx!3&ksb+kRP$;ia0m6wWpz_N*A|DY6> z_4wc2u!rj(SWM@a6X3p!G?JUf4O`U6#ZM59Cc-xrETdc%D}8;brCARuz> z$$t=wBNNxUMZ3vl4g8XN4YIeal^C$cyapc@uqjd9qG#4#(|w&(22Q2gyHn3oU*+CI z&<&W*p4OVS)zie}JCNU{d3|sY7h$`~o&5(LPgz?yb1ab~EVf!SOy)6rv0~SVn1OSRbo=fCh<2v?%~{)kJLCm( zWR37^{JR`|o!1u(P%3jd{s5Aod>mvlaYl?dJjN#C%cFYXL#mOw$&=kl>4YBNqO0Ob zh*f$L;@PEw%p6KGdX_dM1+$kel#;JUjAb1hNt0hfdG3(LS;67ZfMj(UQo%XIQcGOz z+^hxWG(?3?j&*aTNt!Ajbrx10G}k^`9(E6j(p^7?HK%L0_ld-_-pI1!>f^ueI%~(w6+0gA+3h$7YR^2iu%T%DUjym!A`Lp1Y1mmV`2N!qT2%3+LwAuhM3%ri6ZkSxrO?JW)V!(^lqHHP$Z1kHz`Rz#P53V9_)+jbGsu?QYXXqP^gBA(L z$BiA*`axus7cvg^5o(tZ6}#D1eCqFVZbCjV7371D=p2L9rj(SR?pw$mSQfWjFj5O) zCfTQjR_6Vnngq`_`~K>%&&79*rwdz6!MbOcm|#nlW_@86;!{G0m^kXd58lS5;?Tg} z3zc}bh`JILLeB{MFS&87(nUeeuUUT6EIE_N|18E16R>T5{y>&g3C-|cks5r3)&_;5 zy&@g;boq;$JWxvPKGWuwP3F`1&X+_1Ph#_bxI-&c546KL<>IiD&|~U4#s$NdEn(wp z-<|k5^r(--x7f5~@$uQ0_z?*c#BDLg#}1;xjO;|y(19+kUC7lyPn9on#?MEbo$~?} z61uX7`|U|zqUi3vSwqm;+F<^^-N4YM#2)t8g7aEHb@tV5-GL#W-HTnp#j1M`lqMuC zLbT^2^2ZfW4eLK|3{)yxZ=QtdR)if zvT_z$1kjx6)}Vl3(>O?D39|9;ecfc5(t@Y6(f9Yqp^y!xxBv-xh4OIb^VE4+$LEhz z7pW~^Iv@IaimkgOq(qCTa3{?d$gk<@_WnRA8l=|wlU>)e@()mM4>n@g@(V7n3t4~X{0TML z@nhr5IwDz@piX}($BXAGn-W)GVucB5Z0jnQNwOvZ2B<6(#1pdc8(|os=u606F$CuU zkiWb5gg-|Zb&j>WUrb6A?=~@UiGOv7%b|M+Lh~h6R-Mj?sZAnYz7aRVMTqe9 zi-~uq5d;`bTsrnB1fd(!vPkb^1RQ6;?p+%^%1%BkQP$>Luowgm&0B9$)0CkYZ$a4N z;D+CSdTa9^=NH318~I1mmYl3)A7Qv-7ZTYnE1YdBAOB?6W8k4&0*U>Sgkuv5{@$Sr zFxTr-^E6m0yOe`!<>A!Was+u}<}AGb zU|mh@`zI$$r#Yl6>X-;;RI%g^bii{OP6ipEXe6)}klDWj8pN1WXbf!i zNrk}_k}7*zMF-K#L32nanePK`uFq)*x-+8VDO?C+5!=Ae=tGn^t0}5-y&YvkMY_u| z8cy*X5QS1ky}4}Ro~CSD$03#DtJ?t!j6K#U@|#dN=?+qORO-s#Y_x1jmBbUGnDU+b z{QZO11CP-KNcB52ngqg8MKzoP+WNB_IF~n?(V`b&fEN}hhAlhtGz7#VVzn>m&@p`v znT}viaydlraPDL?|C-o4=1?vQ%-D@nAY#L>YDdNGSDtlIL5LO+)aA)LyijTYlph^N z<{ny`|G?p7&BrL-$N^N_3NhH9IVN5J$n2s_EjyxC1y|4p-O}&Ltw7N&I(=ucF<5T# zciL-Ix$wxV**G5z6Ez@7M=8-sL6X&_cSrS?;BOt>{_7Bl%|75YU2ZsRB+Xn)!NT~} z(es@2Ca_}Fg>`d)&y|~};?fowUwq2DnziPT3*Bqy7+lQ?$9tcI^P7}NDrQ3h09V3= zAMj5CtsD!3ZdIOWY7@uOix9Wmf4qZ->ZYqoN6kZ-w9h;QCgspCzD_Q3WjTvt_K&`i zZm>p0K$KVDzwILHU$Bt~&Kg0NYnW2uL5XLU>7nYA0kuZzTqcK>oo8FjyTa;rj||Ox(?Px#9UFA|b6umexD7f=Er4)lwp@ zg}a6wau6a`_@Om>YlN zP->44nDRp%fNCWAe)RDLHAOj(>?`4RpoIahF9EO*o&c2OHQ?N&zyU{T97qxrL}rPf zDc+%Ljckg=c+A(#YW}p1cBsUTAxXX9Jl~U6+Cy-&XK8yGvoiFKJav9pDl~dBW(XFm zl1VgJj?B+O1}xY)-TW8}XIYpH=%YUdZ46ce?rE9!V90!?0OrO~#ex%!7=(N{xLCKsn-LuwOuK~=R$N9I$1Hwl8hrrsSCi8p!^vmSs(d?|eSDyHk0Oq>S@Gqoe3 z2wdRjKKlO;BvoHl8?`OIhy8kYK&)CTY9i(jilMxv9&$G{ZSB4I(*{RW_ei7>2KO)t zRhDTA+V(w6U%|LsY=tE%GWnhO#2NAj#P%xGJJAoXqE1MUprUo1=ZwOq*ovfB)XDwD^;Y=Fg;)e&Xmcd5e80`LAlZ; zfIL_AepDR!ITe>lE_r1FG$*GH2jn&^_~NmwOJ=YhIac@=vKK3N6Vq6-u3r%1{zoqo!d%Aftm=;JXv+?;$ z0Rn9|OKpGYPRGDaDj}91v;Eu8jC>Kyo+Y|3t~XovH&-qb)Cw*i*w{tR)ZQO$?^d69 zmEGq24$X626I^iktdP10Bpp0;fx8@%ocHgtV73P1O*DFq_Wqx6-PNZ7-5WWb^P_bK zO%bLmVanBNc6(X%>zWVF6)|v52GO10M?qHK*{;o$1P<2dEH(vSQCA%w6yrotr2nwh zu%_obJK5fhf1|0H61Iz7m!OEEwS+p^j>0Ea7xFe2FoU58-M}=MJ~EQ*M_&!;LDbFp z8Uo}eo3{$xMu){Py@vJ2C6&R<$hAHJfGe2T{?-7wjysB+j!a-mjO?EEj_hS8!FC`G zqI9MrOQB$xMi&@-SG_XArR4Gz8#E?239eTihNocIb` zyD?p?ybo0Wwj%XNCA`uSJ?Hrwqw+=~=$2v@^32Q<7(Lzo;!UEY9MS-RN@>%&X)%%l zGNiH1clsodeqU^f)=a zJE8R&;TVZ>LItB2)k?tTx;2SmK?y_?uOaBjvg@_3%UM8+`K~cY_U-X-ShI=dt8qY6>S)QW*R+())aFQv`K+JMG9rk-PT(h zoc>`6>r`x5$sYPw0$?0$iP$3hgOmI~I~c|ryp`To>B;c-ixAenr3&eS;Pk+R6z^DW z_J4kCS~7W0y5TdR;dDX% zAnX~HoD}ROi_}9Pb&&Ili$-`1So8AIOy_Nz(;iyd-c8Qj_n}K--`ljnBpKL`yh-9b zP@h+Hq6B-l#Zb4F#A`<>^GO%rZey#@?bRvaS4fB&4{nvp9Jr{^o15iDNs;w zq$lN(#5qd%6ebXUXCXc~eKg-ZDba~EQ3>e1WO!O$_$5?Ne8c2$PloYgv?(z{ZZ#I! zmZwucW!XS}zz(@nv@V^)AKHLcOc9R*13Iiv*J2RP#LuOcj3=)ollJ%EiWAY+9!h6~(cs6moP+hc zBuq3FQ31Qcg&=Z{1dk>em^ah9F(~M8o?7ZAyLTiZdJ^-d90u&h?DW`fW0)=3WNo=u zPlr&4tJ&XQ`Gd9bUN38m*UMXQv+lE=d?`i~b6AGF|AghQ+{Ndv85Uk>!KUg}1Ie&t zs9;jjvL>ZregDBKepsC;15xu;p(v$#{sey97%Jg7Ec3p`)UpO2uZm%hs@uLJP^+!w zaeC~yVaVJ{pnSTOhL%w!&HS}FuvMmb*1%~eiw}tl6@p5EaDcfW>3&&u$Ht0&3BfW? zT&o04Ftq)3vttlbc#d^KrueVr>6mWm(;i+IfG?q;<;DQvt~m4^h@y~Qg`G)2o0_O! z9-!M@PD1jYeh`=$#_tu|^FL!O#bC?(>1qP^4bGQBLihtBxU;7wdcTr?>|zaY2anjX zKSDxR2bat2Y0O4a+AggO7;n}79^`X=ZJ8%;Hi)g?(2h5{><{WP<~0o_?i?- z{sXTx1SPz?1oY$-=jLE)ad2j~d=EQrT49O;v+nO`7;9hffv0MX)orY(N*#x@RdY<^ z*lP@SM+;BINe+kxv^Mj(=2p8)z`Sn}G6tW}5(`;UWze=KVaq}QK`)QUU~9=tHeTG6b535b$IJa{7-esGvg>?(B&sLY z9><8>o`vE%5xV5hIRc`|Eyf=CA>LH>%u&dHLXb3lV08@ho89;+QMSxd{>{3YX#%h& zG++8P^EN_gtKduEI3KmDYFIhSs7znGoDi`8;inS<5C698LJ5P6xKuEQmD?LX{+qUf z25U&GMG=t^Wf^Kfm|Day6@h9?M}rna=n-%v{EPAr&kGVXnV0*{ukMMDRI>)q7%h5#Hc(^Cjcl7iVv{qz6dK#5t_+n1a0^LuVixZd)^_RBp(=il~$jIcdG$ST5FKWL@Gl;nu#yozoZ` zNxKp!J7^Vdm?G)n!;$6|=c@?CILpv= z?y(7Fyx!$9LB2Vqq~L+8#3(OcjJEw?hVK&-fd43?Bqwo1!?J3@vrChe;BUn3MKT3y zH7z6Xb1NXACa?QVBvlHsuPC9sv6|xYj}2vZb3im@)70`)%4g+d19-!gD)Z2(n|z%0 z=iH~}OyfcuiGif)0GvEFv?>hj zM0=&QVCYJt6+^4jQbscX;b32NA6&M#nC#H+d!+#`nM`;J^_3Y%30D(bvI)Z(RQYOA z*|#ty3pwUYWb#AbwU=s3w0VD?;-&TF5&5H3hsY;wjGlz=LJ_VKHQHs7*@PB{r@f0_ zE)ZaN5gem#ruK{I>y#&%K#&BI`Jg!!mv|zzvIrz18#Rh5=(ZCdtd@n;0 zF~ZYSP<~NSTH2^nlwz)T5=O_JYF}wPV_D{^40%3dS?9JpPNBIVPVDoOYsoU=QZTR{VuIub>l%xYbT0J3@-IzF&oV`6-Rh!e) z$Z$s>l6g;nC{SSU{NL(kH4W^A8udzalXQ3P(*J#9oQ@ZA16c!?va~&Vpe=rH*wI z*l&>;IM{BqPP}?VgdFMU5b5pP0L+(dr~RZC?Xmb3+hQ*J97yC#GIRxv)>f2-{yg)` zgh0Ryri~$qB$As+OgD}@kyrS{Sez~?HvlO%t+vEZhkO>r4?Mhxs2f7@t3nL-Frf=1 z*-^)-P1|u=^SQv>t(Q8uF5i^Uc6{>ecKtOk3zQ;4+*crScU}3egJb5M$RzJGKl5vI znU3VQ-T2&g4uBv=86$E_NZFQ^szF4KwlKX#4suG$ z?T)(1Z|T2QSIG^4=NkC0EI4KrQ;LNN{CmiR-nLS^ZPe1s=VH!<&K&;3kfCcTBJ4M9 z7>D>Hehd6(eMD<+hZT3F0^P3Z@}X1s9~F2a$eJ-mIN(4C&VQJQ7i zn;u!p6RR+zAxxY;tC!{DV~rB@^8tJGZAWu0r62cPPy{63ab(1qC2sv&3;^v5a)f+o^94_* zT}JGEmzrfI_I_@o@J4U+5vY&XOoLi!;iz?Zh&RBS1Z{pyPZ1fB`*VJ?WmB*d)e?E% z7s6r+Yb>Q2fAmTEs;L;cn$QG)n=d1@TkQ*kFb zf3up58-gBWIC9WSPLnG@)iq~(NURM3h5O71C}m5W02>&v+}f+@*@nbWrRmoXm@(Dr zwlJ`QUB~63WsW@vU9EVgGiE{DA^6);xrPCd7iU@IqIQM_tZP!y;umS6X>P+QsVh}& znY9|`k6Z5={7f@cI&tkbKal*vJUcBEM>p0NJMlfy{nt2Cv_6x4@f#!~o|6^;Xm~4C zb7#VMA5S3jTjDe)?h>l~Fy;Lt@p5`uAEY*DI*o)T*Al8pxJr#@XwMw6Yz=axYNMCQ z8!Cbc^?oc&vA^>lqdt)zhD<;(l@}2 z-H`G?J}faq@&qYSkS+kECG3hH({(cT(>!_SXG<4$zH2#93sGe!hM3 zSmZ=GQrFT%1keIao2={O+A78MW>aL;CT7NV9uc-S-V#=nL{W@RjqLwF)Zzzj<4<%t z*dfaeT^*^MD71)A(@tKOh6&Q-hNRm-vvHxg_xm`xEyp>5=8WbZO3YsQREj|eA1 zerH>g8nf@h*UaxU&B%8Aaz2RL(@3E*MLuI&chVCF2qtI--pfc-X|ti(b|8=t6k1>! zQ9N9;zC!s*w;uWf0qOH!AqqE@nsIOFs#~TgNiP+o&-f5Wdm_`Eh1>IwTxv`sqM^gY zIMXp`(+g>bP1uK}ZsDe4h}9m9#5k-BQe<(Zstv1GuWG9c@o+Zhn$#OCRir{X8-KN^ zV|`j~kO7N)ng%vWJG0KFCQCri+ZH2Elml}Anl}r#ylz`*BJ+VY>-;#-yvWX`ZB>sj z>%|b|!$+P;XD#E;o`@3~jwd{SSAOJqw)0=H4?SR){WwhCNTNf^)sypdY?vMgpG;Ml z>_?Xh9=DEhzX*<_)Z-7+l|1v|+hhI@;s$NPYTztv|FNE}Cc;)yFa566YQ)5@{7|8L zf&?ZzH@pLjbA&1bxpa51n121k)Kr-vOG9ZVDQYWCqB&Fs%5(SI0Y$gO`0O1ZXCgDQ}93R%+WN9eW`ZRs1mA` z)Kx7}${*|mbff*OsX6tWt7qX*4>Q=X4$e|7yBnf@8seH&S60L@fG~qTwwiy2t-)l#r16_ zY|XEU4VOYNT=RcXzgln9$ZcZpbKgc2dt~}aOh#R*;=@92yE5kUH_T0MeLWAVV5%1I z!Xt}CQ;f$lK&tZ;CR`O6QR1lU7FM56a
7gwc)3;1(@azG4*$Ow9QvEC*p7z_)A zh}yP-t-^dZi+NP6o1U#svwY4QJ9q#PK`HpQJXlkOh?9UJZbv_e&4pwcFdx%`24HxB zw^ydPNV`PK5M&h*@5zLkRM-V93k+g5G0j4BQlQjF*Jtl>b)uR^V*zPNspy ztOTRat^t@a;K6!_O-|ouVBvU3su6zQB%v#qmL$I;T+zLu9U5x&)y-a+HWKCP?vIsa z=D^^-F~+x=K3T->y8%S~dYV(O*ws{1yybtYfYL;Ni4bhb60Z z^`#2aTKiW%urP!cBp=yduLbs7{PWoFI7l>59s7RFbY|yQ!O#m8vn2RQxFK<_eH!;w zYX|wywMliX2rE36V#WQNJ_>pJnvr5%xck5+*%LNhg}Z(0_D#9YxwLazL|;sH2xnau zVBWJ9$hu5dg&coPvw2$tSAOgnTAoV=KKxAr4J^P)-DQb{!@7)J)Y*b~>nifm|UFqPNE z=lUCF!8L$pS?Gx^h#S0@SC~lEN1%xSC&97h>d4Pt7loh>Vp@XXkHPyubT$TfXBrL# z_$T;9PvH1(*r*vrlo)d(kl9+dkDrSR;LmO7Y;Y1pmP?|#P9(%%5PKC>O+p`oh z%PHi{-6Ocv1W-T`YQL4uScYT^ayr~0>YOC+*huf|#;=*WrI=*(eyka^gWH1+3vX@YQ z7EfDF5;jsuz$5?r$=@2vN;S|J;f|G^##~GK`HYL>G?Bx5BwGu`(5<1sASS>Ce3Wg1 z8W)EDhrM4SSc@)zj!e2Ak;tBSQIttY7B}7F0L@VZ2h44*0}TNDS$CO7&|*jELyo>Q z;z3gdel9XtV*|v^&ILHddzxf_EU&Uy`2xj7ZynkrmDep znk$nej*$UP(z4qH7DK2j<>e$x!%jC*H79F_QWVan7nJ|d4eg84HDZ3$PKQWx3j*#P zif7SKAX<{j+jE_C4qoxzrWx>9?Qq8L^@W)o0=NuGhTFissH9Yd^A z;1ow!aUn@}P#*Iwhr2iS3&V!5Z^8E1X&MpvO#TUHIybgqZsJhUE zqf`L1>W^hV@~m&mj+E}Y;(i=+1xE>hGtG{#`ZO}et-@1uo3y2+7x>v|F~=OyJ$`Ug zQ76JyIfT>8>kiN>YJ?CG4(?pGZGW+fszbUYs=mY(7^=~xx?68gnoePKmW;W!i}|)V z`}h!~P{Or_b<-vKo2VmH8wd?j+d7o|X$NO2fSC|nuuf&Sz%_)jxxLCjx5bWDVWjSF zmk?|l(xrZnN)k=s;QqW%+aY4%WKo>A+_cWLp|tmJtdO+96U(vh0;J;4U~h#>3NUz5gvu>xZ(1Nyq=vSE9(ECu zZaY1Jux%UK&j7(C5F7a|{A_mfER=WP|K<1;Fz^=&ye4hVT@}Y5+ByM7LBf^aLB&EN zZM$n`j{l_Rcej{@^xm>zD!qIMKON{ z;`WzRxr$;v!YQa!yS0(>TkLLhZ9;tz_qOI<5$vSGvm-~aTUnR++Gi~VEuwNtDXE9o zkVoK*#fle68h`)L+x9vqlp$#xpjOT4w+e}Z>5sdSxXJ8{cS2$~S|-oyN&TK*WtY-h z+T7>?CAhn^w~-r6sz!W`>CPD*SOY)`B=3vVIFxOSlw7t_le#+`E{aJ;t_gxMtx(x} z+f|ql)|}sjCsV?vzZvS`G^_k2aT6140Q>}@!krxT@T1a*UhvdW1~_5W01Dki(%)lC z--wWUYK8u&T)Iy@Pb9^YrWcNfm3Weq62Q(zU8#2HN1{SI?jv!yp<~xWDp6;}J9BqJ zODz2%6JtVqr26j7`Jt1w4}x-H36_2BZI=xXWrGJCv)3ZC2`O)!__M2~oU~4c?I@{Q zyH>0wTg%?>L`UQd4WoR}CM5kUF{ArPZ%|%;cX!z0p%ke*tyN4Z&V_&92(zcHPhKic zGM@+K)+t@aB~xNix61f6zHnv*blO^bmHNes7erJdbtqB8blcRQQ}0Lr;`G20@k%K_ zIA~0mp|o8RGGFluB{4CAh>NB3%R1&J2m1=BeE=cOq~@^RJaGNyax~TN1+asKKuF<2 z{^oAdS*HqeD4wB?5DjcA5oN)J(joFG{@(Z66Lf#-!=-Ze$XRE=mWnS63>#YD*|Z97 zqzuCx43Wkvr~>$Pe&+zFX_H0VJRN4)mGe=flqPgfi38xJ(`w9cIrzzh)8*yp_6}08J?Dv) zh6XupF<~D6(hZzuuFf}st#=b?EyoGeUsF_2uK+7R)W3xBQrIO|xG_=a!SS@*-?L;J zY}3yjd{}hoatx``&Kr$<;rrrmavRYy-^aZbzH3soSRXE&1Tg*X!5%fF2{kB-*@Q5* zhl~$XA=MsWM3e$`_xZb7i;|{G$2ulBYrvq}xgC0JHz5-m8Ll^{bpI_&_IY*mam! z^1RPx3q|0 zOWhaqYX6G_Q5igrbAr3~NMd%Lozax06eQw6m?QXul8~Px4bYqPyrnYzY8VH-mGf zIm?wsnixBwF5yNP(a}oWdSFv0h_ggI$wKuV#DhR&zEtr6tYM!<5;bR%P~DLc4K8X> zLl6$jCEzvvfgGtRdVv=8$!I2gcixBi&nyL^mZ2iyzd{}x7I`7cTSE}}cJzK)OjGxz z^#T$`gIBLnmJF57ZP0{Jt28Q5u?(9Y@A&yz3*H(;be+ByXfT@W&?^b%Y`2bSjL6qk zHO&#Smy_2&W2Z4f-C=(x((q@!E$vj)-ZoJSdEk7Z7+D{v>OEoTx$LqFo zJ=dWVB(s~3;=>X^-&?y^z-Z)V?EQ29@EULIFqmhl|RR8Yvq?ZxdPycT;~ zr3+p_LC148>EYgdo#ew(wJ-OIq;DNR!@!x1zA^pKzlh}aQnDHy z0R@oSP{5Ip4(>cXNKo~rJ||>1vPSw;d`SnuDX^}Ok%Yuowv}PLCr@T12fb50pP0ZOj*h{<6iKlc=JQ1?lr1%j-)_ z^M~R^dyp(b3o*bRncn)^XvfwimLw>*#aG~}B!YHNn=!|m=Fqe(x2m<=w7-D1?94-Q{k8{7j zN>8f$oE2wfN{>q5=#8lP<({%@bfS2|#MokJw_tqlfM%F+a6&!!GaR0 z4j7#giG`3k9t#^0a=)Xv2SLP0U#%i2nr;U8rWwdkCLEh3A(-TBUWETp^;mm8^yA0# zIMbaUW-nrRy7zBgJMG;T#RLBR`w%h{6_}#agfJT@E`qtmPe7-F$z0keXO{GfUYe zx55I`m% zZd!-UTX+`Ng@o#&7-k8-?gIA;6(d$z&+fZ39c8{8D}L&of6~k13A}|#Rc?ZyQ;EfW zEs?btITZk6aOV66LHAtd7_SPi^tG#aO|iE=73zEVYQ(m0dE=}C-RB%~EvKki#!;sL z;Ok)MydXgi!xVyAA$_Pgb@R?=GpIOt+x+`2U_*dz&#>R3GUsfQ zHo2>gp}Ap+$-0ua^tp$ldmRV^-U-17Wj!}Y-4t&UzT0ts#t3BaF}?bFVRy!t;fllC z{z9qXkU%dmBNd@Tr|vacYWtTxW1mu^O8j}t9{qp;}KBSn6_vjTq9(xY+5!HIpR zqB5S`fYAAWyuOjq^3)-rR-izt_5<;)N;%Ft&5XGPOsm>ck;59<8{VT*kcu zgNULACOFwP#txe=Jc+iQ{RS$xIn$lJ0)gexC-tZW)t#?n%DO^m@fS%ozqi1)Fn@%eV>wy?;a}DD0zA_Z7QK^!(i+C1MbF^f{l_ZhE{&7f zqW?2TVMhqRxk~J)Y?ZwLt9Vg_t+LWNMp@m+NB@=ze`Xz3jl`@8h8+HnmU^Z{=#1d_ zoCd}9Hl`6>>hwb5qeMppRz3IG2|&UxYWid$m?(&ZTbX-w@1JDm?hL{+Td?P^hUg_> z+w=IoxTkyiVz81b#R!!W#nvTUmS9ZxwztQ zy`lAewulto5{$-jBUeqjNQvuL<-Y#OjDCX&5fS2+K;k4+VDnP%>7=u#?%shSUkRxm z@!u+5y))oe$iU?f-}`UpZ);}TJ?Y(q6j7_v`THMV(~=kHKMV;oquW z{MmT+`NCWpgGS0oI2xwc5clxW#2&7CT#tZ9ZpdmFGG4mc5Fc|F`{Wka70xg>`>z?g zk0;cTh7hdkPAR=}>mCB=+ij{VCM*eOonoQ84e<)~fsE3I4N zQKd5h6y|}b43iEDnfS{}pV?e%mt+rGm*u3~QHN2MpJGvpYzn`JxEA49Cg&sEkt2M( zF%)VlHk$u&IWFf8P3gDX2b6e$E4YagHC1Jliq_b84zehZ|AvfcxSWZX&8LRl$*Cqf z2vK7^N7J-Dq3@1jULkRQ3xXtH^c)^GcuD|sq2R?ormG+4+lSI>)S%g9gQ2bZBs9yC zpH;obzkr~quHAqn7t&^6^GrQ+ZkS&iCD1MT5m0YYr~qymiJMda&7%f|uX?p`k6`%T zCRmZ?#O>Q%3;35-5yL1Xg<6kiZAT$I`4!*j|0(vcCu1-!sZ=*e!u$W( z4yEA~9ap~aX<+FF^I2O{YTn@j5?T-Mz+rxc*C2`4K(@~WPc5^?`N8OQ6wpJ5QMoON zlCikSfdgTK`JjG{@A3`&h*xNMFSZo)gFogmZ+=u$XcV%+>U#!v1&<`7KpW&afby|fFXO$i*Y7hQIFp>pVRYix^?+~_pY}yt zsk3tjz)v9~jd}4LHGL}Cmb@5ra`8I8hEqAtXZ2{UI7?t3Zgw5=!(WjhnNMXU$Avk- z&CJ^prx(wffiwj5gr@r~GwbBLfMd0D_CC8})`8@#M3#_LK>`Tl_rIhrl#n}A=OH*^ z6%-!-sBy9mNj+KvS(&(SsOPbc{tZ(*0*%e)OK1W4A6NYO_$^b!KR3+5(X~QV)F>=0 z=d#+BRB{4QyvGY90e(gSxwh>lVIZun)a-&r*m^XZXN!ZL%?tZ0XTVCvH1=^cTMmu) zrN1+&qC*N1Enmz-3Q6+@pHR~58yWxa6~U^KtyrYR?HyB!8{zmy3Oc@prCuZaSItX3 z`w9o9&!yyB6D0{u2ztj*MWmRN5%_p}K^-kUFck@p$w82W7V>)Plf8+q^==q=BVIZw?ve>^7(>}GSsQBiZu&%u%QoYr1&L7Gnm)x*#jY4AN>1aKk6GyF>{>_7WcKS zy>${=eTDy$-ZuujqSytdq3Qcf1#YK#2xq{1NP@?1yeGrnl-A=mj^`Qvx_w`5M$Z}q z1ns?xVh`MhY^Eb8x$R#!454MxWapQg#|zNY_C$lwf)dt5VTVF~TkobPxmjG2oxo32 zO_;|=416{ruTy01oLte`gEY&@Pp%N^-<_}Pbx@T786oa3I0(zv;M7^~pD}Xgci8Jn zsz>0Uii$muQ`66hP(o3mo=uqLAk~ zugy8jHaO0!WPe>$SNH73Sv9Wub^uS^wXW(h+97q3R=K!fXzIJVFbWx1SBO5DjuMmyi*D=5T?krSwG5E`e7(=-KJVV!2CreffEHY_M>x0ae zLV$bukE@7^;3lx06-O~B19hOBN6qOw)$XwYuw@|A)LLK>IjyMvM)Wn7njmWudFjPg zI|t`p_%`L4KQJLoJaY0(u{I>N8v0Y!7J-zC;r!ZDP*Ol3%H4sRfm2W{b{%7x4Gm6v z)tQgB`;$IzR`z13#wMXA#x&&#DT^Zi+a-7I(>PK5zocFzq>Cm^Maqm1nywyEg(5(uvSsh56|`#OCgYza~pqn9o2(e zU<-qrOC8@iqCT#g2$Qew6rF6X8BXF6A%kaW_-ZSM!bIOlN5)|=MZRIBb zp=m(#$6|IOyS4rm2sC2(l>k2EsYjzjLZOddYiyt zpAc1ut#1pEUa2a#*#Rx;Rjq979E*jEKH)#7_|B!r8+tA1Gk{AoQ>cJ<1;dD}-2mNK zYr8%*o-Ywg&*Wbpe3NXlu6J2axBA1^u->SIh6n?@6@o;YXOcth6UaUxaBVc{VLAH7 zIFxU0;THDnyq+W10vU)vA{H)YeNaC}_x4Cf_#n7_M!;-hWpXA+lWW#kSfb2wg& zPIDp+`jqUMQ80!8c>x)x4GL7x4fs71|5F~ib2+7PS}KO*tJ)#-x_7)XZ^)TZW=Y}D zG94SMmjQ~^E1%=u5_*fpAr;eV$nXAO|8IFf1>=?fyJl@lwp2}>(>@wr($p;Nq5ft5 z10^;t4tVEHs4hI}yc>rO*vJ=5fSm;dgLM!hSavK)BbJb$sZT!kJG=vv4PLkePv*0 z!pEp~3l@Al-?ZSI?}<=lhzn>QAm%JzWCwTK%b#f=%NDYxZ3b)d;<3RnrK#Y>fEMu_ zfzvan5%DcPfe~&q&>BHX!;c&zw>|<-EmzKzsrg6FbDy&c9wS4G2bjzaP^nGb+Z`6n z7sAMDlpm~3DxE<~)AW*2SyFolt*ap6NWM?=f%M0%&e-!>KxMFT2HpF!%` z55y!gz-=q-tEe^Vcd_lTM+UaF66IQGbprKg_a)<_GN=q@(ju^>lx|mg@h+CV^?CGE zpd_gbhuSs=?0d~g#ax3~+b9zZYs-S%OSlqsVU> zF|DKzu&)m5rOoNZn!TAgwSdpr>OWd)qGz(WbFH1K*Ls*w6u47v_z`&AK|5Nyokv?D zMRue*x+$oW8{lcHwbAlw_YOK^4x>56Voy@dizcHFdUpMax@G0R>jbLvI=6PhJ#eH8 zNF!o1uq)dgGntu#70{Nlqs4wo0|p~Fl8}^ zUsOR}M!9v7kp5WiUGGVSpLDM84%jKpE9-IG;6Ls)a6Vk|nW{p<*{mHFd={Krl};_; zD*Ui|q5Z^qK+hy)VXUok1*Cl$8dQK#FRaVY3~oqRl3@(Kg*@a`!G!_EH}%$do+8P` z{R3A*^O6>rBx(maYGl4+{$*TQf2bLj@WPN##!&Wwafp)cBl~sa$Bherf|y&<2FHDT z&K5BgqA4nO2|oY)bXKnlQJo{S$T-fV-!kWKtdiXDud6$`zJ|bfO!Vpm z0huDDE$pm>3F5cy$Md#eY;7kR);Kv%Wx})SPQVDl1cPyzt5~v|UWe?cPdULD9gyn+ zY#iro7+shmiIyU*C7MLDCx@JnNW%v4U3%ncvhF0V(G-0IW%hn*Y?9SiOPIs3PuxY+ z+axuro9=s&p_dSmgzK$wvP8LLp_ZqYGvV-@8R4S0E@wavNdOQ_G}*7!ZY1Qz(7~32 zsR+dIUp8r-3Z54_ac&r~S@Gp;&hxIV9EYPZ#^R|3g(_G9AzsL)n);1vv6{drKs<;N zL(!s}Q=Wgn@TPG8ckW@?&URaky|=$dpelVg+{K41#fV$3m%Cqd+yYbdg>MS3$EK5* z>C!t)I(Ya!Y!HK zLd#8oFCi9pd0%xjqZbWlCY0y))LVd$g_5)QSHVS@pL9rvwV?t5%+Lmdz8IZ*#oIxq zssOi4B5Bf?I{Bmch-u=KF$q9k+Hr;eVv+Jffn*=V0s%P2B0>06$}wf*FI}1O)x$(T zgkJH{74)?u^`!(zCX9=L?HKqF&;5iOGK;9$QOuOO7Ay8Wvyro=~JvVMVl~6b$lfJ7~_fg`u$xy_?<1hYWvi+if>E*kFtbV@XO#s zq4D_4c`S!fdS+y?P(sVkqqP9K8B}N1B{PD0M&Ed=NJ640^p%$;Z3_#M{7m91iwer8 zB_NzzJ6Y^s&|(2DP4*B#(5b52TV+?6rr!nUi*+bp>PIP`({VNGy*Myq*u>{M+k%rq zyTsD{h%-f|3xRXBf!G~P=z}H1R8{tVyb-m{N1=F_S=#oue5LPb`0vSUScdC9TPK$$ zCb1w09AzQy7z^C#0*No13F$bUP^GjNwe+t7oZ0qddhAdrI1o#MiY^2-Wih)<%#mJZ zq-!YAyFF$Fu7Xl<8Y=%b05M7Gg*9li6jWF+5Y7tcgZ!QcfmSNhV~5a+3S>Z{k}3g% z1W(2&$R`K&>%}~TLK}j~?aYP_6c*<)>fTM2`Zd?qYe`HfwATE<*+E5{kGG!y>x0n8 zZq!9bqs_jkpP^X6&c++>Q=5(ro$U@4$Nb2joSG_atQ&6)pGGY`VUpXokM_8RGvT27 z9S-19ORoRvxrxr?LmKF$>9Pthi$P_|GJ)0SxtNYCV37BBs&wWPj704Z>y)Bak@*or zDN52N?Zx%l!qvFe<)9W1Lo$Q7lVA0h-QySG;tG65C=(>>@e!Nop>URq^i9|H(jr$t zwnWGZ0WC?;LjRo&{JEp(7D^w3#e<8k3v>fI;s4F{()q$nKkkk zZ`Yyt{ItvS6~T3iZ#P8hlW7Zr%$CZT9F->Wt(bo_$9E^1`W_lbZL{)9!+g2x60s0% zrg0z8K~IB6glxsNG(Sz*g&MfoAy^MMKqJuLv~;T%0#jw^=-5+=4}jxX7|$8~-3*VA z#MByA)o05aR~j&Wvd{8Z7(n;I^)PKF8q9jNcErti_2n^h67d2uFhwi-#n6$d3D!l) zox30;G!&wuWPji4$WxzR^s#BUyMtVKz>))flybh0mHh~tIj1IMjUtYjZ4yl?>fp6J z?-F&yNjAcMJg3Xv*2*tAPcag}g?CIR$ZsTi;#p3Q7=;r%P<5kLFCo&BS-@sm?(ZP~ zPQJ~UGD-*Bb1Wac%>RXeOxcee)}uv-yKg3C%3Q8UVEseME;80#ySyN4+vPSx%}x{b zeA8Hi;QSY8o&GXV{DImZZw`+#vsUv3jw60s4>$etqPp+SDiV9234hsiasj~NS$7gRc?ee-6w5yV$!6DRBY4J!CNN0+dbx7!yPS+72XSIe z;vN$vEW+kSpkc*;=L@rtoBHfWaNV?xJo1)PPiibdnsa4Vg2?QCrqyV?#ZnbzIC`mz zz#hZG3pEvgD*$NW)wq8@9OZT-)$njH2G1t}O6LE~?d;O2fi)Xp`gz`(*TZAQkKXwv zyKttHvvMh+`tjUil$hE)H>BBtXySq*&F`aW@5Zwj?IgQBJW!Wtp=V-}`ZENX(dR5x z0b7g9?tFE{esI)#g{}Nc^OI5QirZ}u)Jx$>DjxZoqYlJq--cPOnAEveS0$y?@QE7| z_9*131whtX*{C9$7*w7qz_n`n#!(*W*+PH1l56+9XdjTYiw`{P?~>H|!t>4_kz((H ziE^T$m2uqGoBhFH`i+z$W+(s_P8D3U$JsI1oH*D)Zi*ueWhzNSX3PG!^{g!_^MU2o zg(V196=CM2BcrR&j5+qkOH`IfQRY{NE}F*iHK)776LRE45A>6;vAeOe=bY~`Ledlz ze-ZJBoP1jes#WBjHpP6ZjuXL*%IPHoL`}hU_cZWVDG$UU2AH?)R$mO79OF(rV#nPC zP5L~q2QkdZlMCgOGlkZ%P`Ro3p8iB##W0yF3gn?$yTKV5ywEf7*R1fIbd=93^O-I= zI!ghL${W}-9B~Av!I+UGrjgZy`-RT(MV@;UAL-*FU}CPG9_dHVs%{I_FWv%*2I#?d z&baTq7gG1Yy-pLNltc$|$DT^i4FjXKLb<$iuBwDq7#kqIr)g4h!%LMAa z`3hHARMm^0&34{1^?6B95Y6nBc7+;U&d+vV1vHr;<0LsFlUw^}D4G+nkQZX`T59Np zUnW{WslNXhJhALD?}~6Ogl-*s~vBCD9zkNl@kWM!J}6`+pn$m zaIMw~+20}X_mw)LF{0z6pw~xFwXar9W%vfHBL%v8#6bePw@-=JDKBO_ zlO0#GotD*cz$h|f)czB3Cd^~RzH^d4!EUH)^#)TI(#w9xCT>u~XtTgiiKj_4%3P00 zhT=_n!jW&kxpU-FIi2t)Rjrgz0M{-?H4_xM7p?uYw>X*Nm<8UbAwHcqfi_K>SG=Qw>A>G|A(jE1e~Y~U566A>}GU{C|N zmNRD+6*$|?0~IJ%bS?O|_o@5Bv<6;wa$X~9)G^Qr&X*Fr5jBd|0#>sTU-7h&R!y3l zTj&My?tmfL_Emr5!0lt^fk^&=e^9cyi9{OxxqP#>zELvq|63%-|{ zXaLY1GPn+l$=;ErT9BzcIw5SA(trS85*TiEF}_crBwg zf~2Xk5oBCkir)AzU6P3NpU!!`ZB7s7Amian+Q_qnW-D`ipo_;V516qH^U{AcceMJy zoKJNU>m_5xmUNxwju2SO)1eJ{PL2PBKycK_ESMiwa#VWzE1tJ9E4|x z2`{0;#jxS@>LP3e;?C`O$FoFpul|Br5*-#dF`DJ!CGP-`2uWV;8GDU4G=n2pjIf?U z*FSg7QU@pAKXt{6u7U;2hl8ooymg8N2|L1?_O{>=L^g0y^Ep8JxYssRS1P@3PtIe2 z>W-XesQ{*R`a{+4O|N^@y^A{^SmORL)+$s=2jYV(O3xAfUV^iQ+cdeeU}0%;g3aNU zR+_RYsZs8H5>3+U34o5lNHbaxtGTh&U@fu+jCqHFayMk-u5|=Hbmx3-5@dYgi-d$+ z+}BLPT{An}eoWIm=>K|5VPM6&8;8NFP(~R3W|n)4ARZ>oO}L1!{McUvP{5}YE1|o} zeR!j0+ChgHT3a7UB8#1)wWeUK1^6Rj10~UHNaPkq*BLA|$%qtTTUzwkMIkce;C}r< zQ)^_jXzFLx^V3qm$x#IAytl|_PS0++n1-9IFWgR#ieDq)N#K+ZbW^xyo0?BFSOaK) zOPm?RWh1t&BNiPc!b2b7VAP!Kr(+NUOrA%;fgkSSIHm_p`gB zST9=FuRBe^NjGv(EtG{)`5)?H_y5S-4BSUW&7$o)u4+GNhS3P2NRJgl>J zWiLW!ONA+GF=VB8CX{i(qyLwxdmz)eNlN$u26lXg8D81vI%LufbgbwD5kqVwY-l$q z2H3AW38cR(FRP*dHTRuyzO`!dU}$CFuS=Qy9>3d0!gxLQWlt!L?_oq`{+e;hY-yr0 z(AgMgVq)pD>$}J;H%fRdOFL+niY||rhH(6?mu-6m0R*4DDm$ij0wOyqAf`bhpmUQg zPw4Q*Q0Fd>VON>p+Oow@cR}f2(0<|3j64x#ESF})ibX}d-W}GwAo}fmIY2e z9^X2|`+hHXFR(uJJSnRT+d<9NBY)w*nO0@PQ>#d9s}*o{)AgNs$U%$Y-gP-ybmCFJ zs`qJjMn7L4qAnK&=XeoPJIMv$qQWwz%wQP-~^p$yF=hm~d1bG$6CEFgzfYGH!-Yew4t&6NI-R*(R7Z`8gS0zt3}lHy)>vIN6I z_rL_SrcjA>6HUe_lkM3<8?e@U!NjKx{+m1^ez*uXrj#@I92C=n(FI?}K|0;*Ek6aa zdvjSN(f&SwiRQ?6?<~a!eXE+zAunqd9o7=&coQhG(HYnY?~@9+7#t2GHQ1nAcfZme zG5|BWQSQ(kB3ViBSPP7Y8ufGB6y#I#jItxn;IJaeyQ;*=DGT+uCFb$!!W?C}Q`35~ z!Zs6r?0xJ}pda=V1%Vp(*y`Lz;fXQQd1}X5s|tEGJ9c|HEra8pYv)f(L~aNYTxG** ziJd#;^qIJa@JJqWeL{@Xs#V{*08U98A>;utXSp7S2~l);?!*nQn1M6B?BD8w8AqUz z!wF#QYBgEKM)s`Z*3_F%x5F(X+OjTt7mgzLOwtkPKHF~}A-g=NdPmjMSC^u0*l~$a zsO(bqYRo1Q4@}6ey7>ee5qN`O#Gv;(%R?sR@T^An%JEi*`hv)%E{CA{1S-G{%?C7xrd+yOOwA&oi&P`gvyw0_|9toPeGqIOobN9Dl zs1>YU)CbGK;a!^=lGYQlFw?lLEe-dABWFn*&m0+M`?-H#dDV<`VYoF&Qh=Q@G}3~g zYj!NMq8a`y?P7BYxfAzVxSp~(9nYoqyHzVg7&*W(DnD#cX-hdr8m!4#+Y72MJtuYG zvL}a#wCU|qh=IKj$!?ZqU<|)1HjBe6Oi_<@3!aqpdNf_;5TZl*QtR1YgE$`Sa#TMrT4?DCP0B{8H<-PnRx z;zq(I#~PsEZl^4`A;DcpWMJ1a0Vts>8IxPg73;9qblohVIgJg7y`OE+S36L-{Gjq| ze8mckq=$A%0GjN$j$7f3T&kVSo*DvvDP*qulxssxR)?Un>xb)uGi-N~3c-?Ej0-M8xvFpcCl#aXs%mwzao$*}4H2^qh zM{*0wR<>R0)o))0V@B#T6ggVxXqOf|W-r^x@~d5oTB}jJpvh;$Q6KqM3-~(DJA;*J zF`G$Yg};z;a4Lzd%ATKAosU`x<-m|c?_J~Rt%^^2-0Eu|4`9c9X!3t&MICZ?Lt(E8 z2BlRt5uUJL_;Yl3M;B5@yGoe;GrtbwM6ptOz7i>N4zmqB#d-4j%TNjo@yg5RZEU_>n9EpyyRXFXeIPP91YPL_aWbqH zDQ2c&&2Kv7K%fS_F2S;8=;jJKi*sN+fts!|_VjOirwk)6=KsjAz|NzKr*C%5O=M=N zZd!mxk^Z9mqK0~yrVwhd5i%51s)fe_)_Jn;*Nlh1XtatO@U<}9e{*yba@nuLO>&I| z;{5qvQsZmPg}%*Y2-%*#CZ_nxVY&<8_eAP+f<~hlT21Vf!eoC5mGJwG4ptg}`tB2; zj!fvE=%uE_wm?THFfqeZ+##NyE=*>{Kjs>;s+xw9J4He&w>Rr&v*r=low5ACA-E)m zUapaddVAfe+NQmlNuBbBm8+d*%$kH(L2&jD|9q^+Lv<~jt-P$*u3NECcy9f87p~R7 z*kGIZ3l$3im2R1tBKwbXlYYOpVm`15w^0Dsh^1@zEA@wG@cBq@Oz`yXBdvX_<{{5N z7%Bp+iIFyo`iS;Gr|zfVkjm_V<6h8`n^>G8K*b2bIF}D;%cap{$0ehXrai^#$^d98 z3X2#pHT^p71Jlbk50?HZ%3R-omYI7ad=THCsL0~!52O%9;tw^t_s{ab`ujm;G!iBL z*Lu?VXf|C2>XIQ7Se-VQ7{=EIy*J>@5Di57ld;-n=rIiu)c5EzBDpgtc7v@JY}OM) zS6C(PK{0(u(PgNvmAXuV!&Nw3^7pZ))!P= znozsvf;qhk0FaD>nV9}ph>3ocS%W|4>sZPB$(8(@2E0qaA!T(#t_X46jJlFv)3z`sJi-A&s1iy|%2Fgac0>sqj=JiKQUW7f=H^X|L zFNf>wH?((C>QnMpZh|I{Q!J}h!$FoiBmNfO1pra$tg4_2ty!+(9Nv>Tr zci)G!X(`TwF7}CGgyJ@IbR|btFf`O-H6Y!pudzn*0iG z?!N<_G1_Mn<<28e6HzzOfmu3_*YaJhP7F^X)I1Kb zTBlkcKCP^Tmz3`CD7Wj4%-NPOkZPy0C)N`QkH!4H4TNI}osFh+7tCY^28rP8w)NL{ z1l~WnDVSc;w?)>+Ez|*L+EG~8Mn1HzWFi0DoE|QeeJhD_Hh#*n5oRiUQxps>4)+=gF4Ma zrB#nB5wJUUq{7j>k5>AeYoz#)ba%G~2saB1eKh?ECKf$lDB+OTZ&XRC$qF7 zw*_%_J=ocS#mPB4n|Ezg$>dwoWoT~ek>OQR-wsPaa6V!h7lR-o8xsUrfKp{RCS1o# zkPk$oOm+JOAVN-NM6(D_se7b_NXANgx`QD$AK9qELfbzrWr-icPJVf4%M_L)qkj@Z zPba#3PSSV*%^nEM@iz<`)b3gh4q`0 z=(*^&yk&zmh1y6u>N*W~RW&&)ai$073?swI+|EO892RK9@KY2H+gnrGx%jD00{jzK zH`zh914)6-H$1$%s0Y(x4EkVXrjV?S*{VP2kuJS6_NIM zmQR@?*-5Z3Z1J#Yj^ocoDP_;*<8&0D3EeBuHY&;_=8odv48!5GkuXX|A6oeAr<*^R z6nvi)>~+?XUT9i00b4CN`-_{`(*lMp2~Z{y>3ywk%n$IFU7FF~$2iVcb4v#+^SolM z%Juy;p1Bz>Ur?LTZ^`!)1QR17InnSb2e92j693aueJHh@d+=I6Yq-iv!~*-(mx?}P zgP=PZ*&)u(Qr!_q!dYC-B* zv9(PFGHW8W-Jh6+UG=xkCwLvrwa+M8T$Vjor$f!Rh>HL(1o(gE@fzkeRALd(*jY7X zWXl`WAW0zMV16%Gx@7jJ{gx0cXS+wK)|<-J<;vot@@miC6NjGp>}H63-?xQ}kt0_m z>rhO5Rwa&Q7u1EmrYeK;O+acV8B^Ueg~OU@QQ;)VoFG+2>U$*jPubuYw4z*`|0?Q# zmE7$JSdZ=*#NuJvueE?_2P^Dsm%sY`xk&-X0lHX66{Z0&XRMz_9`E>mo>do7zNpyL zJ$CbPq@n+I?|Z>_F`Kz(C7Gs(q*jr-B?_ZgIX&?HDJY8IhN2ZCwH!-JX9L*6kM@P$ zhkx+wN|5#W&g9@NX>r$B{aS_C9nN8R*Y|VHwizOF!6GDA2A`Q>B-1TZ+S=4|*ZM?C zfi|-gIgDaz{j5U;TN%1FMI*>EG!bs+>8!vPbLu&JiYGOGr!#+(j&)SR668f8CM!&9oA zdhcf@1u649pK+N;e$`eZrR^9ChVj}`co6MnK$V4^c6a>+YX4Z{gTr|#7&OwC4DXET z^m%K&CaEE+sN5uR1K0T4=!&A#3jGQz3+8H&hwa3}#s;#2hxi1^V?1RF9D{Fb>bjdGAOPs%27w5K=leR6s2EA{YmMHRIP7 zdeJ1TZkm8%9qnH`c~hTYhvA(EPBbV*?^33zs*<>Y_b!A90;YKLziILR32T^Yy^_S? z_5en6`p`o}WB^?1(W2LwC=Sgyhuw)H)uXlRod$5~Ii%G3KO6Zq$kz1j($UX}(q_vS zAmWDgdqsp;Ke^Z@dsDf04)4+#FqZ0H0?Ke%pIg930zZ1_CFDv!ocG6nA{JEXfcY{D zJy)5fI9YW_?;j?M;{#;6IBju;jdU2j3-6!4cQyPdgrDJfHC$|d3T(%N z8886vwjl1nQtM@3N;1if#p%mg2@nXmmJ8yXwZ*22<7eCaUL)8F7pNPhPhjvSIsU}U z36o0_?dtLrnxhQ)5>tkUUa@jyP$erpdyo!lK4ao=t}_%m&Yue}lJ4fl#aHX|)^drM zsX<>!qPd;(gBFd7qb;DkZg}2Bz8-L7cZD1yE_?zU<&%ag_-J`%rey-i`D5Bnkf^6K zw4^DwwaQr|qI5ql_-BSQUYOlP75cSd?P=-jW3@)ry(M7G-`B_>|UKO9+8$5UytD#e{MO0 zaOS5WwQ-NQ*~UlQ%Q3rE@eeX{D5L})9_8@^Yq6S1S9N(B$YAboiv}T+u*eu$jR4dQ zN9r({0@i<|dx}s=BY>pTG-QC#z0kZYUJ;z2-FedW7X@lv|AOThv6cS-awp_wvq+TM z4o=b?^g<@Q8Q^H#oe4K=(S9mav{BY0EYy}^k6SL}kPZR+nK#bK%|zrGnC1}%MA`=c ztm77?DOCeNJ|cie-)7_1TcbhR^-#fYy(1oqg||8xGJukWJ;b7UK!DYP)Wy=u(=c`? zCRR=;ytwK*1rxlKak3XK`z}Zl0dEH&k&eo^HZ4Fr=2Ux$EgSEaSd=?t+zwfsBRP*{ z&>l=Bh!M~4wn(hEyuZDEe)}hmX04sp<#c+)4xX|JhuREH`_wa{IgF`pjjY2rDmjRe zdEHdI7WwyEj8k{nR!R!2JtJEPdQIU=ytW{^yXR1pL?N}L3ld;en@L+_-Rz`@guVfo%W0MIU zxHzb{{$gqkc= zGf8xe{crCADXLdbU`72LQary8;or=xNe?+X<o)~KX?J8ur97sOBj3e&@wUUtURAix#VxA6UDoaDHARpy>zRTR+yqcH66wAXgvZXoKy5|8F)6gVt#bzaM-avvC~W z-eBl3!lLnJGMb(_+dQxL@1Db>Mo-%sGdfglG1Yx48`Z(Wzp(b`Z0j9@q`a)#nRt3v ziekD)-`jy(3>H{uwlxk8$A1mq`Z6C=uy|H7{8|W>Q)Z@qK)C^w*%2p>N+va{M( z)}y`%$&ewI<2J8pp&33}{-PK(d>8i#q}%|WiVcse&=L6g4*ktn3=CO7Tes+O`d@H3 zd)M=`zc^1&3IAZw!w2*fGU)L^_MCo35-k7TLGEn`A^%a$fL+d?Yv&I}eQg&jF%Y|VRCywnFGaPjO>^~J0s$fy6p8a^j*dJonqidZk+VG819t zj#@o&U+z*kaL9oe{xkZuo2R8!qC#^>g=6|l!)a{wJgx7t=U_2qBj-Ue0JbC#d02`Ze``T@es(ZhZ17;JA^frK#s;f4;8Z8ncvsKKU5(lXY0MrAlOqnn z0oTHM%AAFnfmYnJaLX3RTOJE|?Y}@-01*~v)wJA!U!O|4?m~i9P`EP%qrO5EZXwc{ z|GHw!oN%{_H!kod=-$O575Q+C!*cKdm=)(70>aL?^5<%fSPXNkFEo(t_Q>`m<)x3V z>hTBW+DLGl<36hKYK5@gj+ts4gnN=yuDtoi3-bAyJ!_f6fj@_WD{JDeaGKcy z$X)t~7OvqnyjEG)>csZugf2hIf9}pZj@6$a9oPBEwqxH=86oI%%y_XFfiJbtJ@6A; zem!zgqmWK$Y+?*dS@ZnFtZ&^Z0uyhe0Ld-C|$jcev)2iCeA>(yd2@ zWQS{Ko)Bo}jcr58$F@>P2o9N1TDn~PCfd@#*K=3sniMiw;-GMAEEvtKT4?7qY8jtK zU14!B|9dQi0d16$1{DjxyE|kK8GGGy;$s`y%g%4Q6dPO*7Z5 z$d?-(h2R$`fLNvMKrg5(f9Q$Qjgy*Gb9EY8r0ga}#T!U9bm~1vI79oi3}MnK=rQg` zsHbPytEdI@?7WU}_0yIbc)$E$;*HgHP2e!pnrsgc}+@&Ub-thIgOfm&Q0F_!_1SqC5_ zr1sdd&tt-0E>OpA7R(sI#|C#wBxNDf5&d4Jm`QPO#crzOi1qWuHN*bGD_9TyvPZ*} zD=1Bn1|nXv(E0&7&_~=J(!>9qw(SQ+xxxAoA8tqUR6hLLpzcl`f>%Wk&6c(vFI>x_ zd!aw1{iVYlVu#La#aw#hD9Tm;z_ ztB$oz=ASHKkzS(1t_xvR->D|XuK?F+y`>ihhROMgikds0S56TNM}fGU2RAgdeFmfJ zune@`lRcd9Hw#kII66J2<(Hckl(r;jvEtqwB?QgxGd;r^d8BOaPLH0GrG`}DTvhOm ziOg5vmR4Z)`0jR^y78@!cDF!IQ93C$NG+rFp@Ew$marDh2RW4K#iZ4{mxN_QrZYW~ zal{aW-pn}QVBvXIkd(*EBhd+u@itmMb|_0BW*VH}K;Vxf)Nc)geaY1_0PCd(!M}*% z^aWt&f7__rvpB6ByUDb8VX|W~Zx8#zll+Z3x_EPmVAxPib3LrmAmZ%r<8Y%UHkZfoTOIgBSq++Bz%t>^$-%f< z+1`8e+)CUjoctuW%d-a3*Xt6Qt9r@b>@i24(97{Umwj#d_Gb%i00VRq6sb%qFnqw$ z7E!rQXIPD~qx-f-i+C=mO^u2GSnWM{N&E~-yd7W(XdqYxoJoSKTWrg6Rtef*;2I8v zlzPHe@J-xLv4CYg5avJ8L|cL>!FrKVz?|T5hZjOo^V)x+SR0(~gEtfAJ?3h*mv!Rx z?Nx?d}zeMP?e4j-b7Ti}>jAeS|v~{HlKW?T3*0=(kKlkx2_0Dj9m_@p?RxGr$>V zxa6Lhl%%i9FSru}0V@eOW<->3rIVI=O_|SER3EH@6JUEseEX}un)DiNNZksiBxgKQ zY-Vfksd<=oG?;Do4xS$9(Ui`)Tk!{f>H0J(C_rw=nByA4Mr7vXGofc zoTH$r9Kp#^VCw;9zkA}zZHXhOTwz(R z-EJ0ej}a1pJ?XRXUt8}R#bXTc&r8V*uM?^_EV@u15wg3#MQl02U23|@agA-Yvr`t{ zF+b6X^6bgrfKx#6O0ji*BQWU+s15eQ?9(-Zhwaa;F(rg23%Me5CCtuL#o(=52Jh?D zG??I%zs`P)OPD5DKBw#ZLmGMvcI5(L@&Fq!FeaOs;*g~tMB`tLey`cT`G4cj+0BMp z&#P8WaD2Fw2)(Y7R)~_g1CIs^+21JEBzm+Inaz|Qoz}uxDJ1O$*r-A12x@@O1=Pry znJ^oo^-nz~-)8G1H`75I<h!CTy@k=ACXy_3j0qLrworChY)e`0q}G>`8&9S72Rxs%Xd)eiRHEQm$aO@ zBu6QvocATrX1s(z3}#3Yzytq338jCJG$l2woKZ^H)x%F}P~<({!a%X>0#0W}wFlNb zbW1D}*9{{3DWg(j(0WLm#;zu%phihHB(61Wii|IEvWQ&R$GrVnD2m84OjMw5wd@Xa zzdt6%LD-=`L^xTXlY3~{)O*Dh(P$`8EVIvEwJXjjK5pntGqX0*_EF82|EtNA z!2vr(R17RfQu@fE6d6_RZt(6I^FRc>%E8WXL-8?3D3ECT73il+;Z=XeqiVrRo=`}iOcz6xk1%EbaCueT5I8+tm7`ucl zx8521TCISW0R|~WE6fxgD|d-78@wenSuMlHR!O`m)-4#Wd1iv#3XsaD|9ZKL`bFZU5}O6fM*apDWh#39Q1s20taDjuyj|lztdKQ z>#&XscApy%eP*wm{RRQn!0bRJn)?GGtE)rJcm0$8<*{o!9MqeiHSihe97Pq0gT-a{ zTW^u_O~Q@!AFDE8)7T(bqtnMCLHyrMW#mUM#j;_XwMoE|4dK$Uo)0L|@PLG$=_pe| ziPTM=>ee@vj$86)fz#^dy3nCB(!u&NSVIGOY_k66`C^1!;_4$w803Akb2I#~zSe>Uuj&C8mX8ZFvM+ zWs<+}<{K;y*b`4CqZ>LZr?4V0IfS3jfezF!N6!0Z!b@Qm@Iwnw{v$hVF()yLrw#5V zhY8)O!Zo8QZ7)71*92n0{;U^M-#sC>eWkE|CxIVS!>>;DHSSKxe<)^Nn{$T|u+21t z)QtKhC)-*UUNW4TrC3hU&1a_-yaj79lvW$V(CnnlBcbm#LhTP-?r5)-^V0?^2ygeQ z&j(kt=1-hxA(eS`?S}}iV9^5N^KpYnXUTC!!?2bLWHA-d%lwYazr~>J#0aGF ziIE*B>t{_h=^|3Sla%*NP0$exR|8+7e@Yf=Vri2|v{8&}sy=n-ta1+F3&Vux#}2kv zzKofurvh~+Oz5h=?}uIv{~?4cjdZ^Kiltg7$1YHmp9(bU@_b7^!}eXc*-BCl&=D9QyuaLfu6%+~F9g&|0n3c74gdZ8x25tKZY)K|16i%YB&VH{XzkSzD3*sf zO774UweqfEhOvg4!Hwr%OIizreS+5yc^kbBf-6`}3^2_}v6ZfJel*XdrE6M636P;D ze^PCgkyc4YS;^FL3!(Q&v33EJQ~WW87{%H`XT7pzriB5Y=6BV_YYo)t0$yN6`Z4E# zKy~}O{Q#$2TMR{KIoh(OPQz@hAa+uT;@9d11!gTC;(MsUsdvg8Npjuz!JJ&JWz4DP{d7@J}h4UwF(*=CGbz!}MO*0h$ zEJ8Qq9$eBRh+KcTk<*9A-o-rZ#`R`;5SynAVJy*W?;XPl(o;c?fqzGbsxX!aZ zTOEWo28>;6Ja9d=&;%I!p9BPK^W5f8q&$V!R+-Rcuf%@Nf!sDDLyo8^EbrnA7=rFy zB7d=b+14#g(GV(-%+f7y^rK2`QN|0@%JeC(4s{ohQG40Bf%U^_IGltL6{)T29TzH~ z!5Q1FO6*v))~QI?;A8|h^3^wZGlMknk3 zQGW44qts9IFu9OcXENrUYu>+(o$D&{Gjg2er$>8b_AT^@GeQC2?FgBY_1(pZ5U;~I zXT%-3)44Gl>=6=0kr@xPSW;tEEsO?_eOXMLBEPf3Pfo3HSwwXpIVJY!=I%X<$@c#+ zOszB?lWwWaS5O4u9G&8=bnzA`>-sQ7wMI%QuhRS;+lxvb%6Y4uZ81AV@YsTh2pl)R zO*rY*qyv*Ry!djZPpU1N){XnJ|1}`WB`5Be1YU_wLxQw`sU{gmDy6|$Vo*jl0TMW@ zgE^-2(;BOW6zdRAb6YIkLPC@^gg8`;CB5bl6x!X`0RiFcjA;rj{#pQf&B${9)>PVV zPGaK2E);uD?1|8p;EE$Vb38WKp%cG2klNp&qZS4=5q-w}%XRr)&4e~l{`lo3A@>UC zGEAM`J7;`b<%BRD^XLbP@bJWv)DKxjb%nxLP{dcAC7iSIHFjfj@~(g*Y# z794bUZ8g!*-s^G2XTa`C+4vDO^PKI9QNB51m*!ZFWU1)aZd_p{6OC!K(2+W@$1mg# zsiIJ!;?!P`nH+)Ixk9fx++^)J%e=tUWsRz8iVgC@C@U{VwCnSA>5rkZWEey%;H9R! zLxptD4NNZ#bGzBr5B!XJu-n(VRf%EG`>TyzK_A{J%hNN5j)GY#y$%K%sjEug?AUv% z-xA$oIJ|*POL`cVDBiF7e|g8D)!Wz=zK{&3zG~xL9xCips3Te=vS)hQ3XXHsUBFee zkJqRyXKUCS0zxO$b)j>9mbzzwM&FgRdBz&%ASx5lwiQ?Jt%@J|2UqQApgns#wT05i zLIVA%m&%A~?Y2itMyKokr{=Q5W`JyLRGedQmZr9l#7hwwQSz(x^N3%2LeeF4UP!V( zPrhCEH$f{8Yf#p#>irk{bSLyU06t5Qan>1iCGg;J!x7Cx%gA}Zn@JpkZ`#}qkz$Ugn)_GIEJ@WAtSGGvj!o#(QPPF(7cFFuyg67bR16uY`f;g^D8E8;kE?IfR{}}(ATVq($fUvQFhgS29h{{c zLg`K870R1|yI1qH*IV2aTV2i?D8wdUkhKKnO!8*MzCLDiSZou%1Wy}r-|hegCg_fG z@$I`|VHU2y9BCPSu4+lB(QEQ#TUDi`Xz1xKcg_mW0}%X&xsS#gN4#?ee0aPAkrJ~5 zqcCHFw@w@2tAu@}eS;JLz{^*%7z^(&QyrZJHY%$YTc9HLQbAI5(yL}&z-6$ ztjru>)F@Net zY6ZPb<&&qqM?)Gg1$wj?)d0qRWBi3%5n8qJE|wxzXFrh6! znXnl0fY`51K7$Y3i=Ys$wzhQ;r01^&pK8n4Y~GZk|-Q;6;z&Vz8&UQqNR^D>B&5=i;-4;vgiS6?}+&xEQ@9qE0 zUmK9O;EY6t{#RHI@NOmOb4kl*%8qJtweR2}0kF|UIo-r-OCbL}bseq7D5%o6I$R;c zB9q!Yfkm-JAtYcrx5smVj=*`5JH}Jtoy0tkOgon8OymrmPzqE#``OwDD(lrTrK+ns zMeexH3%iaMPw`SGjIVEcXiNrhhAsK!+9!7@ws%w zhEpgAA7E&K(V#mV-*#_Qb8Xndh|`rj`z-p%o)7(BY4*wu=!i%7UZCW5eb7DBu1m-T zyP-pG^n>>Lo1clHr13#W|ButAb_K8tx!p;}l6tWm8)ZlC%(0&Pyl(k8#xvc?2%i6>f|(Tu6oo z-sF-d@Ya&YcyJ`+N|C@SApOThbOs5ay3rg$@nyKJl;GP@*bY)R@>#JB-f0jB&OJVj z6HXFhw@TUpBdtnCx!>=OIX8j-;J{a9Zcy!>h_7$HOl%MEvad^o?chP)&^G_RPs}v$ z!X7XNyJNo@5hl_^^9yE?Ko-~uxQXIB#8UJ4%31(R4wXdyF>(yH96daGTL5;!(FYj+ zAzq}Z$`6C3YJ;&d*Y@Xr4PvBSspDYX4>5R7JGB$MwyW$Yk~xb6E5}1fw&O-3rrEmU zLlF9aHZt$%47e_|sXM<2W8f=D@u);MWrRPlO}eY>(Z1QJ1~dAzn9hQP;nQ75_xJey zja*0T1CL{rKldUAY_eSQ2zSM*GbqYxGN8txwuJI1o5MZ)p=^q|6xZBDYiLWGuZ0QN zHiu`>d513r=Jls;lX1Q;O?VK$58{9F=aB&RUkD(k=$FOdfb%iOi zG#!OsEE20ro>V<)2glm9IQT}!yVbmN8_&nURNabD(6MZojgZD7n%C||E`%U;E z7C*M@XVz+x+%tjE=-?h}!LYYw4=oHMYP1fOqbvVEONrg&R5hl%VlD&&!ZACOp2z6y z2Z#r4ko6+yJ9*D_%_?mo(2ew@A<7$J%_zp6n{;oi#Fc zMu(hj>}y@+hMcQbOuN*heFS*f%RR+MFW+p3gx&)$hd-VoA$I7+`>#iXzrjlX zg%DUn5kNwL7|w3@b6=xvN!3b+>#b4@k7d)Xd6tV`4kewFvyB$Rn~N*GilD`eU}N}Q374Qt*tTQHDK?3|t&d2Vk=S$LPfrC(A*tJ#3PKg=wGaBdFo`W&|HO;B5` zPtyAvdpM#xs`N~_a88^PH~s;C&P7WV(mvKZNmxpm(rL|EU~(TMZTbK^3p6!YBjJ}F z5f4BanChFjnYuRPatoY;cnORbk-v@08+P@AbgIEik&$j*^ZElb%k|Y5XTK`bFNHZ5 z&_Nah71$H)koe(gLwIj_RUGq`e|QN*{ehF`w{hb};*`67w_Ml5#sja!bHEq%VjPM^ z#}`WJvyH&`l<_8(9qQ94xI53VsF{=wMN;GWN5_r$Wvhaif1rlH5)`@11sOys02)=_kCXM*NUgar5m@f<4#Z) zxXE!0$h%T*6q(cxd0DMDiBSopUK-YQ6%f5XOR-O-;!<9mGkb$0GYeI0c7IrAc>5L%Xcb|Lkbaif7-}8zOj`@i#?8WjvNg|7GaHw!HCQWuk$1O(>LA-+B(+drI zJ8l!1xe@h0@?fmJBegOT6CTz(Ti6whGT4NJR;vU!3-25?K%OGGKWtm zl8V9AvcOZ`CZ6N5*7;5k(tiHGfDw47t7l)zGrCkC&Mp@QCMbUPOBavsdlwa`XnLKj z5zQ?)Tb{kkUL}e@xRZ$OUoXN7#Bt&YOqr7%LxR=5qiscbl6|Jcj#|*~My)%j;3ghW zZPT3GCR{;WkA#_Tm0o&n7LpL-Dro4Jy|#7|)FU*J$h#^04GvV(dnliuNmPrKoYnOy-Y>fj;{e;Q7CsQGZTuxFj(!VY0G~^^spkoodH~JA8Iok_Gy?4xRG)HoK=E%HauWHU!=uR(! zq!YB+yn0O`PC_210&`84}7H5V;3SFK1%iDP{ z#j#7a5O@<8vSlellzo0MtAAtXpc$Kh*iEU zSfIUfKorBwi{(Ll7V46!0d+qjzrIp;!4-kl8eb(*-he_&_ru8Z_p1K~g{Hx`f!ca$ zW25sWGToc(TjxBFnTfGjPJ^jEo2}0yZSxFvq!)iB-e%0B?WoKl5QbGaCj?W|_6&mA z=m>p7!Q!)mOpTTlw1MIf4CaQ!Cjn#%3t<}F-y!UiCUR-QBRa`@B0g7CGf+U?5%Yc2 zCp_6U?4qp%!vtIZV#u`>h2AdaHlXKa+}u~`Hylof=~c}m3CEC0>Zva`FEiu<)r)~i z^)KyF!5>tHG5FW(UXK#M#GOp@_*pe$>Bf|xEBlDiNYBzS60+oEo~cu>^1b8E3wbd? zATs^QAODPeo=I9w5x;rnk2GrOzWktpg8aheAm-!-F<$jtkd&8JE4n6?uGzw?Z|qPSAMY zG*IXNJ=5P^+Ve47_u7RCOO>x#3;lhTJA(1|z`j$*b)oDt(zp9G$jus%g|_g)&0UdM zkDBcVN>;Q%d`P?9vsbunWwLf)x;Dg?;#;Bzlvlr2w5*f*hQ9wrA$a)9cJ}^>+D_;d zSrB0x|CcL{3H+si4;Xpn$SVgTvdAvdBDoq*&U8S(VkY$#)p#dG;K}U~P+kLxm5ZJr zmnvyCr(=wHjbYhxjC@wwT8EGdHzO~x+ftk`>sg21U1s%z=xNU~7Mp9W(1HtIEsJ!U zd;Agsw z%72_$w6`xl_XXS1@|P-x;D!opdU;P`*L^AC$@Iy(H=%we&ak9|oAJAX0WIlrF_uVS zH;YS=WC~lSEGFC_abWxg+V&(V(gONg9h-IoXAFnGHsrQhOP$4FhW~-m#>T8ISQNpl zD&w)YMmQ|R;@9LY%W#-UiHuOP6Q=k0k5gIKZ5r-u3 zyLe1(JALgaR}A;lcDyz-`Z-`gETqW)_|o*E#5zTC%j!O?F+$fivl|lj!#Kj<51bo~x>Mgj`J<9tvWbc$_yaH%(f`n5w`OHU&Ij{+C znKE-DT9O38CHaK+TsqdNKS@G?gX!TL`{h(|$^N|m@h^IM?6v?&i)oPqSSo*t_g=18 z)qeJIv)F;HzY2bNP>|d{dQfD-v(S8?`&U8=rEdbQppH(Se_OzF@M=G1q-IC7{+ir z=1=n3ylrE$j!GXNb<;Y%ZJBjCIu|P&$MS#cRG4(4CxE6J4bPw=)-fCeG=?(XM@=WU z1kroIgLL(CEMLcowtdr+fr=NC0M^H6dPfYJWpLHEM7kbUSE=X|~agH^00_ zo0p14B)?50J7MCo8hpzJ*13%h;qlq=3nY%!%k}$wz%@$Q@cg4|*yH=c{xr(_wuylt z8XtEr!`nQR7nbh5+ERVf$Vv5iIHZ~+^d)HwLWBx>{({G3 z3263uU82K*I$BBq z#0oh$wag&|@f<(Byk&|Be%-IbE**HHT%K->xr93MA1_v6Ss3*epHSN;5hZBAu6r1V zLIlD4bO_aWZym8xOnK0^JuSJ&EA9Et4x{vs)&BYT0CB>o)usaRq^dTn2%sv$m?1R! zrxT+1r~AWNaE=P07^Of{+(vY#pRbPQYISm+m~nm{0uYwRIC32s&GCI1wS8KPNp7zK z2cQ4Kz6=)Kh;TfGr`Q3o`_TVU?}FdkAyP)*b=2ysQI(Fw0HwZ9X4F`QLR>k-$i_~5 z!LfTOkR5@Nw+VHdg0AC3XTx9aldK^zQyXcll(ke+R2IT~=Y)a`ETvxj=H-c0kJL{S zup7J6=9~KMrtOIsKh1zX$+jACd=Rl4+uIl~NS$XSLumrIULIywd){aWCa7JnFG_8F zc~3cdqWM>+Hr4-2R24SbpNuwE=yr?kgNEmixr;&QM;K6-Y2c^zPu_rNf|jA^2cNN* zz%pcY%IxJyuBK_^=Mdutyr9?C*a(IcVaP{PI83JW$y{$NzCo!yn~R=Sxk_Yy9eXcu z&C#Bn>ZhMFN+_f!NhSFXpe&E$=ucg*UdlRn78gcVP{CS7acLnAv zTK!~!3@=g&?n2xn1e$k^%)2NyyCW%MY+C#a&p1*XFc@gc86_bJ#7`sh{A zePPgqIACmu<-6nc>7U=513#L~jC;sXm>^Dja@j=v-<|Rt`cyp7mhuf}h{{%GbV&x; zL$wzyL|HhziD}#g2hHRmoW%k$?V@Y^ zf%ial0#UGeeg_oyru|~m$y^rKmD%z;?@Uz1zmxbY=jzLDSsfVMNL1`zf70Jw8F5t6 ztE5(e$GD*-3kq1Z3wk5@^F^SlEviEFx^+M1>En1jEz%HkV@v2T@q+59q8B(KqZHW@ zv!G&SkYieZG#5(?qv)JFsJZJZc@zpjRv*Eleu+o6dT)>kzQi%yR+?AaTC3W4LS#fI zv9=uE!Fqi`Hw{dYq-2z|lDwlAzyJi3P`lMZ6*R*CyAU8Po2t7PqISg(j)TIfN^Fj0 zaiXPD3$ijgMyznu2)>K!9z)SjBvq4$)i!4;RduHCrC(&f|Jq!D7k$y3a!?@3*-O9; z)$TYG5F5ML9STN1#OD8)3H*Rd?u^yPGD)%#lSj@H+{`aYwCTSl^rCj7G2p8|qQcz^ z91g%sRa@v4=a24k>$pzQ+f2D=w$D6(Q9_aqST_bYHq>Vb}Q6mXi7VgydaXM{iX{#5}&E= zgcCjMGOKM*4OguqbMNTqpsq?eFvB6%)E7xt+Y~;)Wip`nSDNVox{Vn>I;!^VM~?W( z@h4rufga#uwkdFZ;Xmm`xJXcAXpPFz!B`TRP)UffFpV&Wdq);K%fq`zI^u|zlpZh~ zF{qkV?6@0LO_#l7EtEb)R8WjaPhot0V zky%=Q2M|i@$2QL&2L=1~;3zD2faLnRgLFEv0BnyH$^=NxXfN!CEFLJ~#9i)n?Vs;| zt%n_-1BUrW+uhiFp%gRQuP}^e!MI*kocjC#1PqI#+{ocqy=%og5j2jmhfJ?}RrxE^ zCt+_hUQp>~ZSS6>flBmQ(Om}#;;dHJi>R1pPg^e#u4pQ#S z{_}(`DgHB^a_D7!NA7VIOr(je`=5GdxA)H;oWvT5sd!+_fvP@48S0msp^IR~u>pqt zL#XD8n^-e>;>MmpQSC{bG(}!1ZMiRaj>P>?DcZcg@rmi}wrpN*nDSe0;rrJgD**z? zP%ZD(^BMoNy zW*jaF=vwpkFHa$diiB$BH>9R_aNMJu|wEYWvNPi#0}<6l=>VY#;avXP*wX2b0V7wX3#?&>k2ahFXm4+nfDYa6JufR+dPVj;#ZTk~15 zYYoci^pc4Q=Vb$0`u6*|CS6lZ{&}q)vjB}zsUKJY^*a9%UQ=gZz{FfmxZ** z2P(=|2j+yiJP6omkVt*~6D!1JJgv&G&vKk3yXfHdtGsBHJ$;6+5JRnXSeg*bu34=h z^{QT*^VP@Ly_a|q$SdEX6xzN^;PZ}xbM+QPB>+5jrBKPnUcG89#7^W|h1HZ~gpp4; zNv^+XP#L?eSf=TL;IEf|gw3Nvw1htWsEdhOwUeO!=x#b6hVMxdB?tp2-$BMSSs`Ab zOG{6-Q&6l0zx_027q(4>g}gIDxNOe$@PjUQx4@|MrYx9t^)QxAGrXq&gxVwqH2sQQ z15;}{)U2KzY*v27iS?2BQE|1t6rx2Hx=UD%HKBBsp#=!Q z23V3R(Od}tuwfB#uhF&O$AjEWM}X0c+r*VP#8lWOcpc5uP=_&v6(^}@Z=9WfSIi%JWQy_?j(#L9(3p>okEG2WQGYTuD2`=Wf@GXl8v-Qr>FD(s$ zHu`6l7P8?WQV)FDzo?)M#RkkTd6)8*OTl1lOgN%Qw6mZGoPJzRbyhAF59c}z;O z1nIj5b_L?|^1SqOA{)}4rxV7p&2AzGS7Q%`Z9XuEC|lr>ODzOo6!r4Df4M5?zOIIo z*P)=xd&-{-0tuFg;mmdLl{g?qv$5J(&b+Z`tK=1lq>2~r^I7VuhpuNyAr>M4B-V-7 zwEE{Fvy;PHlwt0zBB~Lq;NlnJ7fI#n7v*9%pZdqBl~Tjn%Qo~UxqB;Hj0A4Ycwt(> zV7#F+9tmuRgS5!?UB4ag$IOgrRiXIFp=fV0%~Z>V8*%~oUW9>yKTdU>?m5)r5o}RP zGRdZCJiV|5@bl4OpqxKrx&2JJb=7)1U`5xHs9ldWiAnDE>~+oG-WtR!@W7-8u#G%N zAeeAJ4C;5}!B;<&uTT{depB?V-07#{R8An5dIq-5LEr8o_!k$-#p^iVK}$DRJGoT~ zi5qrua$-OKTUp`~voSK!sU*QnljBYwf;X_*N@-v3ltLTRaJehi&HjV*rH8zTP(G0p z4+`kaT9yJ6i?S#a-4nvxPuDeXu*eS(lWMtDoVi1Op8wI0-rxA^?B}$-r&}hFX8)Pr zdLq_<4kTuvD&N5{E)N3z6(Zbp2Z$?~8e3^%FXA}alp@-5XeCz*yN>#B!JqXiItH}b zTv1O8*>W*4RWRiTbGH4Sz9zJB{t+JNo1vFUNgDYYFb?Xgzthl0FMqt3<+bRe?8AIK zj0&#XqHNfnJUrmw)eJ%jGd4llpD;_tmRG!shSsCbD;Vo2=_ro;?E#%o_y|h1#-Z5_ zV2kgon13FveodRsgd?p^a>UN$7^BGn?+!*QIcWpGCDK2y7_^5%k$QEA1jZoaV&GNEB}I-eA@G7~Jf~=#DqlVR#GFda-p{89`y|7E=dzFX zbUGbPKTtmHwRRm?WGt~de~v2#d7efP3Ew*aV&gHg7e0gsAFB@I;WN2fk@XS&!gNbh zj!9&TEs6_|?kO|3akX(DjVDKDpyfnBz={AwKFMy9dU7Q? zv6x#Y(NZrmC0u3`*5#tPaI{AgL;*+w5pIw}N|j{p(Ny@^aEwfkc2)g6hOcjESydhY zz7&r%PON>^ooBnVMJcH>*p_&BGl0--WH?0|T@tqNsr8p62F5n@af0+VVAfP-X@_4I z#fA&U?9qJ$ITt#$^GxhNAn}NxRe0F7e{@(aJ*Pu}xLzoI{4&iSJakZk@c-;QR(KS) z1)M{>=+VPUL#xmqNh>R#Ljt%=RJYc2&zjwW^DtdU9k2V+AEW5AS;Ty%aEqJf1S=S7 zJIKt!e2FnC&P-oYtZSDAl;x9~8)M$l*+8Wh??DCoo70Do!aP8fa-n) zfGW>Gn2U0=sIRpwTZlGqM5>`OD$(==9pTBxA&0F+O%ZoD68lU7TiIsaf@8*4gvwph zj)de_Vt!}uJ#WRQi&e1J%xgY`6?0fo4=3*@2cj}ch>nWDv&4q^G`1yl5{@r)^U5$o zn1|JV#e?-wKDuuxM$OuOji~^m!q|uvI(AW(cuP?g11Li5s{s(1a>`=zHHgKUdOz_a zm}?ZtTmV%-s=sOylCvTcsqUuQ!tAsGLieetI+k3kb}|$h(w7s3abYtE9!fs>4C&@-DYe>!fzW5egZ=` znu6g`0#^F8?TF5YUsSKp6mw*kStJ8B4Jj(kBVyHm}KOffluYi=apx_BezPy#`(-w!lR?R+x*l&(#qobQK3 z95qMFz=k-fB##@t!;%1HeRY!?-o*cE!CbG_04!u~e+6dZ)4rkhc!VCj*f-;+ z+2(DOTnE3g{kI6a-8YNy-VN1Ly;idIMhGo9tGI@ksp%52N41In*HYMh)ez>D&(I%I z8p84}iw3#Of_-LN^Wc1}_nF5^+1_|Zzkt4pR z8^Qa25LK~h?(1{J$9sl?po2_3GH~L$t-JVbwaq4fMds+fu|_s6|uhj!vvUn>uB5)?@syKhC-Tm32b5+N~sI9BG zU}=vhgngE($s|Z;K;m!`q4cJmPFhvLVJyO9W3}cX3p2C#p#5GHV`W<6iD?_IGDo?T z;`HBr<(qk~ro1!7+l>F0Lz8ee)|rSp(AUs_QjeqVna;nLp(TYV#XE7bKeFYynyu<+ zNM}db1r~{fhRvocLwy5=`rek9H&<0%ZNpOJ^Y?($5%_34!$UdU$|*0hV%WT3@y+kJ ze@>K5Co9+$xO>qHcHMg>4A3_j9v-lfmB4|Tw1dE!x688U8L^Woy%mmR*)zOT1X>pr z0mxG~{#j}FB18t@fmlS+S6VFfJa1*3(E>d%tzvqwiS5l~Bo6SNuq}YPTt(*k>%563 z_#RBF(=tsqLY0!uXNcoo*_%z~g0Y1knk*==f^%1zb_n%)DT!d<(gHRWA<=rZjIRM z<~?{ATjZtHZ35J5)jbvR3k8=!R(@LJ*v%Mdb19^g{UA$NC1x3|rq$8$0j0O^VgtfZ zseKjy%SsW@>WJ?;9)njV>a(%-YA&7~DhG5Zv}3Y^31i1EYJ@ZT`;!F97f=e}0> zF=RF(rG?IYqiW_K)?C?aHWK3HT_hMUp;jfv7qEvAeCUze43n^>+QXl%YW{vqP6<9? z3(I6m8fJ7er!FOv8Vefw$NlmTRVA1{!;IMWZ5pCNHk?Z+lw6N2LC#ido6VwX4c`6G zvff!yax0(Hy<#y=F&7VADf92S(;>kcq_+Bn;eOXtl=9>Lma|cvj{C4k1ZB7j3S2Sx zU#Zn6rA)t@I8es4MDEizBX);hR**IW6`T6T0lbypdjME^?G8XnNkzbu(=@Sxg?qX! z9+x}=0kdI^|5!|VMJ6}unAwZ3(D|O!EiAxWFNGg$g$3>s)E_40m-FH>Zev2)M$&jJ zY+o&k1+n^KVqgz_lM_n~MKl+e-0vOaa5{n6-$7z!oK$?Zpg3N6p}!vx<~?8ACX}v< zwc=7>D*yzcrdD}|!0I=gFejQwMU=0R2U_gtZh`O3yZ8IrIDrgGi~ zGDE#SG9^Wxy6wSD-s8`bnW~ZcxE>A~4_^OkOqE}JMrO)P64m~PjrkCGr0&~$)Z0Do zf6dZltHle9Bw8uj0%UcK$$e;?MHeK5$3SMOy&_g)2e#+qQ59DCTgZ%&u?oSm9ftA8 zV4>yc>2X&aZ7Wezc=T_>Do*wm;+Vem<2w#zc(yefIDV!AYTpXqFR4o1#MAE&!M}nX#Z_l|C(hVFg*+WSi+JKJ7q?bU^-;^FWvSqoHSbB}p!*bGk zz`+}v9~gTfbv-DC4N}t?Pgfz$_Kmc#LNdLQgjN9~xievHVAQ7J9H3^_P1b&pZLnP9 z#H|@s>0EB(43(mHexTrzSTlCT12m+!q5rR!VBdcTPO4Y{rz|3>9**=Z=0E0A-3VqG zBjBjqPm*9EeRt-k`rg4OYO&3S;diht*h?Zk4jLd=rBiM)-rY~~O|6)YvsT0az^9(BkO9 z$aRlo$mp!Pz9N(-@bg%0ncaVFid$--9m78`xduobft$w5YcpVOkn{Un7e|I+D){vH zjfz1(QG({L5x(2n@v-eqIHmat((FcUrM9XbFlk@?lod9oB1fmj za~Ww03SK6bfg7w1R7Sc=7-vnECTYMF&(*gxpq-tOU~8jf5<(;+1W1bc$8iqNRXzr| z#kRLy>Qla*m4@m~oR93beyaxaF!V|^L3_|0BRbu}GWP1B;iow=u>exeh&V}rsK8MQ z0y)i&gCC6A<~~thIZ}asTN(V^Ry2NY9B;Xjg|H!)k2wP>sHjz%%y4#O%66c={u0MR zfxqp}^sf<&&SPpRj({?MXPB(h^O!80mHo`q9e9m21>E6T1K$FR+g1#rqPCOE2*cg1 z^4p#6vo`j~^TFnZlgY-RS*k}$X9v2AB79dwUgS~I3+wjHq0yLd2*2@@yjEVze^mLT zIr%ZvO&5#rd4Q7I>4YpqR_H;$SsB9G(UN>zeP>yxF*my%5z5XKNevHaZRppQ17IDd z=^MA3ZI&mJE_17;6J5D|$RH{eAqd;aE0x5FpwY;9Pc!Cny;yI8SecrW7{-q-(f*+N z>g*Yu8DMc{mZuF>$OC92QG2qwK9fW2?T9ZHpdujtYjtJ zp3%J=h>p`!s_qZQh#GR6VD@X8L2D~^B-TqVG3Nt-T)^gpzWk9va-`lF^<{uV@B4j@=Ib7B@L9Aw-RO@a?AF|AL0;W{a%6N-Yo7l#^ zTMe2BN_Fnm!o2MWo9al_pQoo&i(YWuy}@;;u?7HJofowC>0dDq@FybX8xH>4EC)AG z4l8>5B8h=Tqf5KTW4?}E()Dty3zRi|dKH9Ru91cY;7iCEpU^Hd*8q2%S(n|x^Rt&E ziaRy12HCipzmB1B#j>-H%1l@f`Yi5!sV!!_dc+~6fpXnLL;WqDf1Y-qKE9yGf-Yke z$IgixxzAh)?%hKEmLa7^LpLt#9!flIeMd4cx$%WhJ9rd-4H$gLv@*#5>{jd&)k=nD zE+L=<7AK9AFoh!h!x3Q10l0aHE%h{{fqa(u*dk1_9K8UwLjL(5+6alWuQC%dWIzd7 zLbDRQav$b`C8jKO%NvBgn{`RfavDnqcIx^N0Pexj^bNJ1tf0`lbqA;V@wz7SCfR45 zql-|1TIh3@!RVDW@_fakfHTByoR56q!i9>9N47OpTfU)vfItjsIH-H#F@ibAjePdW z1I~@}>^mo=#M;~xG%GUFvscXVT8EvY)IZZ7s3nf87vjF2({ED!YLsx#uo5K3!X)Ip z#re>;6FuMOS+-KgmPi&8OtH|=zb3pzo^!Zwk56qaQ%=30Me*o3=0VHjP`hP$f34a?j?i3R1QwXH7&yJPcPs(4m{qYGvfe$ShRx?WlYS6L(PYp?Kc0G-` zq69YNYZK^d3(QyO=UjplHIsowm1mM|0pAY)zot(8hVnNnt@LV#;AU}f&*n!zHV6@aYD&u5oL%H6Iwp#aw~| zPQQPS$u!9!acL%J!PwydStrS7BDOW9NiyWIjVWA3jM<;}QdO2uuH5}OT5Bkrmq3pP zKl=&UwAl|Fn(hW+bk&;>`7W*t&Mi?OHCrH9@zr zUBn4Hc7tsL`1&E72ohN?KVkMZt{tdTluTLqIwa*wqw3sfxF7M=sH#3?bJlu(9!vU( zEQUD{v*U)bn#hSj;WD4Phw?Ofe!`frOj5{qB~-dbeecoQD5K0qml`5MM(peG_U-Yc zK15(h6GqhvF^P7r=7vD==*w!jbZl;w#dVS^74|!i4h*n3t0ee!`|9}LPy~1Lqdk*) z$uu_8RNTaSS0g~W8LMO&mS}~#BcSmogzv%aVNk_Ny$x@wc7w>kY|#&kYtV>Rcfs|n z)JFjN7~+614U*tU(E2p*jim;et|ZkMsN1Cvx!xAiJeOI(td;-PbN>PPr0bCWEB*`B zAa2hLoL+&xQ5A`4gR?}PB<$t*ZR!Se##8kkUxe~X;+Ya(`9!LHHcwL{^V>NJpi%qg z?G&wBi5|UaY{s{YTH+fWO*MmE*k0Cj%-!Vas7c5#{fzl4#Go7gpgsg-0EcBv0l@|E zC<*FrNtESoKcDy$U4o} zfej}HD}D7B@boLPTNGi}j*qIkP6mwH{5hMjpm7a2%_mZ4AZZa8Cq|=F2FN@bg31s%85ZnMC?pN9l9|W3Hd;B#j|MFDe z96y*?w`4-ZAzEt*rB0Ym2BPWn{G@t(#_s4E6k%OHih^M%?=}-<$Pm-U#sl?PD)6rK zSVg07Ui9C4`H~lI(q%FoInOk1`gGZy@(=bMS|2xk5<^Lft}FU|Jgve5jA!@;=WlM= z#|+w>Lt5E*$C7$*ah9*ulzxo|63eimqk(-nRmbrP)k^zQ_gSKAJ5(t`O(3;V zXgJi^fn@gz9mIcd)$v_V|68^(0)Rys1zW?%a7x0M*$XZc{#YWJcP6U<*`JrZz%neT zw{mhmHM=dUjV>RK%uaj(cDf8Usz32&CUe+$X>XflOTI6&8u2pDz@<&IdgoN2@nWhB z4)369tZQTj1S!cL1^7Gsq)xssJ*AY>T<=7Rya)sH8xGM;VryRywBnj-GzfDNZ^jN7 zN?ARlL(4)WF+KX=OqEGTk2Q@W-RNvzB5(ZVx{{s79K~ErLEFMn(g;Pyye7bBDg}4} znbYA!XHkX;Alc-S7lNw{sB3ZS)3J-{oq<}~s&&tZkb``}LQbl%sXc6BVQN-yRHyd} zI+z!{))&{l-?s^Bxoosb+sYE<8`QyC{NUKwKsq!1WgSiXJA;rd5SUXSwVp@}{cIo} zULR=jgd;`fNh1V)^L6)g(I6np59G)S-zdXHI3Hnib78##5q#Dg^p2nF*Bwn2gj3xO z1kT>5pW@vJ*gnqPX&0{_y5*cPL$mGpEY)q!`Zu)47^oI3NpRESG+Ltnc%9}M-P6{m zd;`s(cSqLJLyj=&!$HkX6IoJOD6J0=Wk(8JcGd=IR`;eZxMzB%`P;`)?~4J_(I*cxyY$K^PZ3d5D% zRRM&H%XC>qF}h*=H(9;1y@m0@dS}v6`r+c{15HT2AaLay(Ofc?Y`ursX3zF8l#@R~}Qp_wZj8Br2OvDLqEjcLIM4Kh#BEDCiVB+n(tC`njMo=#NN_GM}JbAL**0 z5Nyehz>FL%tO~L@a;Z?okK6|Yh)Cr+csAg#)p|}W3T=yv{%TbX$X_bvYR5CSn!XUo zL`b0N>hKi?)RYg0|K%K@EA%&*{N z?sVKqGWNCTqX341(L(1)PiwR86ZOev?rXaN zJ!4664(Upkt2NFYPPTyO59UZ~Fr_f)T>NEW<^r_T^c$^TL%vl%2BMWzlbo8~6vi`-{vE!THu{5)R#Fc$l(*TH)^v92z{cdvr zM#@}vLVG|EG}Dnd)lkQhGqQ~NQLXc@zq?auyrZcx5Re@NEkpWv?CG>kn@~*8W#XMf z)5?I+NAD$R(@p1DsktTJEm=w7J=|mHXI!P#SnK3Um1^xN9gdxhO*qK5nBxXve(M@A z1J(A3TzV+!H<(_A9o#c(kBFgQjG0nnQ8(Ksr$SZA@hwQg!)Vd9AzNUw7u+YKLO}Q- zss*PHXmWB|3u9OQcjkp_v7+_`qqwJo<7Fj>dlp$?$t7Rw=Fo9OVp2tkZY?tCVU@VF zvgfy$?5QdCSa-nUoa~r?_+vfR7Gv)Z7urTCR8aHSLYzkMKj6N^%QEI^jT-k9#4!eu zFU(x(ofk`aFp+;o(F_ItkM=6ycHX9-vV(`skzpMj;)dK?X>=iPx3cfl4HrgcR&jL*0*8=uur| zl?4DB0=J4!6Bg4Moq{atoSUi0V%oX{u?T*D_E7Qq6Nk;D!L%^704|{3aD=tPy+A1i zznRSf)_wE>cr-HY?}l1dF8EUw9?w%q_Qlto^LxIyhzG z!GYGlV8E$ySxG$93HXd=fY~`?B!2W>>8eUxsdB>WbxlC2bk|EjT!bF@IJnsFwj-(x zeYv)2HYmASG0q?UxGl?FTxshC$qQ_CO3U4=Ecy~lsnKGqHLEZEU>iQkQ8ifi7JOfs zhxJ$!3Pr%kxqv^$7`0B2+n-4y9Z0v>Kri=va$%nmEwjQ;{}OYPR~2%QgI7ByyUP^w zO_z-}1a`(4TpkO$*+&WDI4Sy4&d~z$2pAr&g)wF*DmOcOSHu%HdEx#`lta!DK))=u zM49H1scLU4PO06gE!&UotcE8rLa~ticpa^oH|+KAmvjDA8DZaVM3fa0yvS2HA^4-h zYh^WAyiU>iL2{$5YI%o3mYqS#S4&%hMa)UZZS)8NaLs1;oKDOH*J;Zvpp@r%2GRUc zU^qWjPM(aM0|7KBMRQ0*w{9LP>A?ZOho}z1+ZFo)(EK$VH8G6Mf)Sho=$JfeF!d9klQvn>Jiw- z($li%L6VK;_BIh!&|)5tb`1eN75sEamdB^ctxX)_eEZ-Z$Q5}MPS(hl#^S1b`;Q%V27-UbOfr@jYwt ziMwSo>v6@*v)p33fK~hc0sV8ZWv55{5(K_^U&8S?m!~tS;R``r9?*(>!$~5pFu1xy zRntXotO2os`j(=j*BS$8pa%FJx|Zi0Xf|kWI*SEAS9T*cf@&2$sll+i-TASFh>Vhq z9*E^wBr*ru9mRbA7vr}91HM#rwBnGX;IV2HM(ORMLPwWc=w41BY95c4h>30Z0a=)( z`i{^dcZi4BA7-!j29aVR>Sb}Q$?KsR;*sdwGlQDRN{86QZ)fWlkP%EH%23$NS`-eo z;rr=JmS-eKF%V9QcPfTbnq)Buk=m#4SiBK$6w4v^48KlObl;b~m`Q(1PB zP#5qMrC&_%Q5~QR%6V})j2B1s0&8;Zzz|3TD2O86N-6U`xt~D3n8s%fR|tFG>d2`p zCbC3Nais8}9!j=2h(oi_+YGVj%+-Az_%>~5{FSq-L)Q~L2H+vlbG*GoVqsw0v+ZlN zq4lNA%~j33{}J`5rIzUXlmyn?z=CS97t`gI+%Lb%2y>*Dd1r%AM`mx>_NKQj@(u}Nh227Td${+)p2#W@@)e*f%#|h3(CqON8 zjl=u6X=Q3;-j~ z+&xt2Ug>*ml~52C)~w$gz(br~?AUcn z(HP`yv%x9?F41yz90?-HRT9b>lEJ=XLN@i%(9gK>1{xez1RlV9R7Zm3=dcpf3ZsUA zDkt^ED}xf^)E8fEO8YI>=)jL;C@XA1Nte%R^;TdL2<>P7f1~>6JMy(gJ0~@HzX0_5 z@0+5E>x&^eq<8;YlM`10U&{!%ik1}+1vf2M1G|aGJV@Y6KCPaK&%N|JfYQpr=np4# zd|du@0O$%0Bcf;kc~MJcr6_k-YhAI4)){C1@@K)DLUg+R4B9gI|Y=( zSK=(@vi(t4rF$o~*+KXhqV9kiEsIK;yPr^GFbw0|ho3Ydd(&^%KS+xdcpcgiH{ z|EHV4rrns{(KDRV_RM0$IXz|@pY^u2h%wMWKXVUI;~8mcN^LeZ7c z1@)-$GgXmSF!w5A&j~gwUz0qzCJs(J$pBy%{+p*TYEt1&HnZq}x;~>tI+$=|N1P*A zqUOqOk5*R5$_@{M{}@wH9xtg`2R%sVbm{{%3A2PIlb-O=Bw0h5hxgrAZJ} z=FMxqQp1gHOM^gscJEyfWk)3b{qD)~(ygHL*Dq7hmD5`+qboa+n}-r!pN|Yd2Heem z;oiWdxxV6i24wr;NqkKPbV0m&%hXjv*baA;bnqVe8)BGvIkl%7Te$x}`iY&Wq2&^4 zcTp<)c*vCG}UPX(Uas|wSEiApt))3RyhJ-n(7{q4l7riY$@BDJwPh6p%&^D0;` zwTasWAR5bc1fM;F)*Io-fH3J!-}uwMvW-G2^mFcYI+md-ySb5}&OJ$tkv z9`B`IeUB(^^GuYU8NX%Sq*ZAb8uFeThs>M8H#K@UO+uoq~iy zU=B!>Jtm(hEnlp5OAlljlbmT-h)n8#N}%78CJTU&AmhFmhaJL5oKa8Y@#s_?jKTBQ za07{G>~9T`$ofQzz6=m6aNWyv7!c+;z%9xZK`yio`4NaRv3TMx5gaM8j>l^8uZJPq zI0R0UyrrE9;rGV(K!qi@X2aq(Tea{X@3e&LN7QwHWcLXL9Az+`=%S(kX3x@2;Xb^X zd6l2TjG88HAbaQ0C21SMX>v<~Z%B&_Oztj7$z<_jeW1z2XO0)&#gBB!5)8(sh{wGB z=ljzf?W<8JIa`42DnHnTvx@i1VU&?2fE`}1?3sK{>MP5t;B4>2>fHtK^;ap+@*6pt zYRKC-bl2R_^KcXcT4MlL6Yt6Gm@*y=q;Tng9V_@oa++P3Jqv=npNZfz3JSh1yTvKM z00b>7;#xqNVPHgL4}pj{jp!|nh}&(e!O8+BqrHLbM*MhCnnyh#2~~oX)PQ`y5I4O% zoDwdK{EBp-dXMqhcJ`s$LfNh2H*vZ+$T^GVv0(TUCFqA7AQ65Z(csMkzlLApIJlfv zr!th{NsjwCz)=O`_8&X*DRmfMV`k0lp-pdq12G1dMg}W5OYI=sX~4l-M=i0_#caOx z^EeBbU`?PGg5AoXAFHM>{+D_Xd)m-f{Q5}5gUaG?-q9PyxN20#L_pMd|5A^*Qx-sY zF06>$QGX`xv`gpjYRu{O+_J50rS@MxKS75VW9c_tvzs0RdUF>c1x zFe{nq@MUDhUArn5q%($wzo?rl7`sHNp;jMl$e=m|%%3Si>0$W7)1N-GPXSlXk!;V0 z;GaiPQKClJUq~|M_!uCjVy3!ii9OkoIJ)rYI%3oWnyQnS(OcRRV(yR6#woGAnWQ@{ zZS!Mxj&P&WoK_o^zpjZ0fJZn9nE0Fzk8>~{8U8Q*QtRK(AYJ%Xii^=ho+xdt{&+Rkee*kxRiWRh{hnn_JSD5J8b!jXhqkWJfr6v z*`nx#SAEyp4IAGDrGo!xPmEtfOFRv8XW*fyRYMaau1=KE=jrq`MW}c_?FhzQ!B_fC zi$PD!WM3#2)CoC2BMoXb4i*y=F?V*Vh`U*2rq(ybFGy+J()f4S1w3&|l=ak4xzolV z>@}wjRqwN~gDWueU~n{3o3eWepXrO=f5x6AXyQ*P#jW=WAyDIM6JR^=OEne$F5Z4p z29(!C^F132c$=7`UCoKHD2UOrssGpuO}s=i`Ax{$lVTRF+driB0{eZa%`=7>EMBN>sdN z(EVR>ss%)w*&t2O;hwDm5dTw)x-#u66&;$kPvvd&%Kv7x9jaPhD{qe&B@zupX|SG; z4ANy7wL|s*M(JS+mUYH5)x0qus^4j`%Ym}2tcI3{c;k8C?0=VJhQJ{17~6|l$jrrR z{t?fq7^%_wSZYZRx6DDodE7=8#nv9l+_iY@VL9$--W&7_?oFoXH9mR5bJ3gHnx}6m z?3hvTbMCWx>82Ft`^xaD98`(rQU>o;qi1POx>i}6X$q3--I-i}6C1on`IhKrjG+5b zWGo*^zR5K#!&$CU(-|xXl)67O9QvOu!O8)Ah8#d=@rU9HNy7LH*T(KGZI@Xm%=9(O z5eFFbu=p4Q8r~QQ_H0uTJ$v&I6@vePxU^_Ib5}b&FKWki0YwPPJzXE>DzP)oU=KW#^_RRQtHZ)uuJE@tuNvFvx!kijE(FU`I4X_={s@f_{Q|ijKR2d== zGXa1%C}2)qiN^sSDMs&2U_|A#d_kXvi1k@~H6%aKiic0tNvClYyggfL z_K-=3qLuO6?I%(QLNRx3QKB5%LyhS%=uE#v(pgExc+wqOmL^+M5YqtB+!J5xfAg3T z;idfOjUPUMF=kR$RJEcqUy-0hFq6IBwu1mndje4Tpw5`)9bvKq=;Sv>F$i-CT|8+# z6(ZuB`7)AF_vWDENfGV8w;uj(0IDPp{cf3w;0tZuY-W%+qVgY0sgoq{Us_nfpus1z zOO#RE4-TUtb_$7H{dK$wQhC}H0C~A{#4AD3v6rILlhM`hZKJm;L-MOR1~u}Pd!2)& z=Jzw6_B>xRAc{r$BYnZ9UM4k0B4l(N2}5@&VQoKk^mco6*{JI)tI=u

z3xMjYAV&)!iS?BiH4)U*ugTu^AnHNxIxgDU#9p}i(~9Al=8EBFS>|YwEBSHT`weIS zVifXd!OhW;)Hf!o8&DuAPuXX7(VkMuj8rR z2?+4>7G1V>(;hL?0BnoKkie?&@5Iq3Zat(3FPfO#*E{Sd*ium4AGkdP8xdifK9IHv ze{cBN{G2Sdx84Qqv2m$YW-Y90kHb>Q0XAmAZ3YkqxzAxlYfN3=2J=$v)Pl?OG{ZzWwzs)rDVi8*h%Zc9 zs~No)A+`ZF!i7js zsXp6MHxs=SS1OuP#OjD$>Jsp#!ZzB^T4%s~sYzC-8jRTy^2^KQb%Qi~u2o=HehGj? zZqdJag`=y0>~z{0qdpJuIt%r^b2Abv{eh(XWKdbOyHaBLszKIR^7?%SW1w@V^j?&; zYR)@(a&cGMFZ0tL5bU_sW&(Ob#()Uj@RPpaq)3OuvI=VCMS1%kkBcyE&f(|f(2A0aD+EVl$Ek+>7#@6={buh*x`GqiGg}V5-Qofk(Hz>$c109SMW*Bbh(!DvoJr&q;u7vd;DzTJ9mXHCOWw zf=dHBs&UL9q^mrpyB@aS92PMMQbIZBF}DF3e^^9>b(txlAQ=drK`QP^;S!&jnipo? zz>W0iar^zXCW~vHiw8;jQ_*4$ZPl%i_rKwKbs|8Ga`IU=sM2qV)md6j+r4byp{n-7WUHKkQHYYoJ8sdP?msRR^;TV*RMMRS z=#_T$rf!*;jlNUqh4?n+m+;DLl}NYq=y7n9Qt5LmszoP!)8~==P+IU_s+LrQd?18QjYci3A$qk-#^q88oK#gDF zn~z%qq2kIUbQ}Qc8^fq_j;2kC)JI@^5ZUQ~ob@Qmak0&V8j{hgK+*o>V4rf{z&>ni zH8X$u&%x)3Z)mo&?!OrDU@rmtRBYScrvo2}*85HHgxmoVoD{nyWg@RiO*sYel;Zy6(o zq2xvQOHk9-Pv6290?qzoa5}(F#8bzB+vyxeOGRaiyL9y?`DDGUn%%qDY$^KzK879u zR?o!|b0LV(Agfyf+!Oz0bI$&haJBfKx;iqGue25^!(F zwv{T=`bF{qj{ylDZJWInx^VEbQo`@Du0KDpr9Z1gD zsjxZe*rR$RMXPp#y~XS6N5(Xn>#hD|6^Ye#HjAPrBalQ=++gi(LhyO9w||gK1}vZg z+hPjQsFqMbrR;_y0Atr8{T-ikGAB|8)C&hTMH}2Rhi(au&^#ABl#jJw9INAY8#bVR zf;-%!Cs%tD&0%C^UESmM)N{YbIqXOy=^V{G9zKQCv?iA{SW8Q(wSvJ7BsR^xmowSe z50HvcAe2R6_7ABd6c;k#(9Z8JPFc~;M!A`UGcC!^?^od`3o$QLVuu6v>7n;ZhzemR z!AF_>v%>xjamXrHMp@M7i?p+ndl5`^q7?oOJ_ENu#}VItz!wj4URk;kyq-)}4GrK_ zCU0f9`T!+43|#|@ZbU&|pF8q0T~L3`6d^Q5>>TPWInr|WjqZu^Q!zrnHF(S|dQhdy z2b0F0Uh^PXxuh>_r$1#C_`~RUEJbh`ohsMJt99{+In(p0L^?Bm_Xe|0Tkq#6!n}I?+QMl(7 z*)lm8yiNvW#*%APKJHRLQ-kYqwK>Fz+*X{>bZP@kHa`Wh_5Z`vJf+au)sN8oLIbx) zZ|e~9rE~jXt+uzBlmIN;4e4|?VcQMgzA{L#n5wTpkSlGgYBscov$b-R91x+ew|s** ze0~9pRa|vvZo<>>UKu>~^WCPnnLinK@FA)v2{%@o2fib%t|nLJ=r~HHiL(U=WOjBE(;i?FO6J zh}$}=_m^_~h}3Z3ZH!{-sf(=i1MGP!Y5^F7xw@n*N-E(VY?s6mdFozuZAG+R%O;Ly zjTYtVLRa8FSIgB1h{_A*yWu9pXFh?RrR7rY1Dy0*qZLlq>mQ?)fYhj6>sKhv zH=k*lg+vlXKphqeSH+d6c6!W3FH=;*N@Qtg{KE(7^w~O_P3bxPw2)>hlN=b{rt?lh zws5iR_aaVKH*yuO1py#|uKRZ;RlyfK-af#5T`w_xZ>dJ3+L)*Umm7;F9Ou%$7hGo> zr*&&DhIQ*eVaa3+(_>Pf;!e2STum=H_PurIf z8~FS24VydQRJusjEJQ_aWd#g9eT%U`)5c(+;VP`JhbGL?nO?yS8+J)J5ipeGWQv$x z3Q=_8DXFy%wL%WADDOjwl@7F=aL<}$IP$ozpU8eoZGSA#Q)h6zFqdX8vaAB9)!>gm zH&+m4$<<}Bq=U*KapDAM_T{WvX)8|Yf^e5zkeW@tIyX@fJ1ZKS8Ng5VlTZAUcRe8HvGomWphH(V)3m5uYD-KpHaD+TUk013_7tChr)#!{BOA zOI%d!*-O@~<{Ht9k=n%*9Va^cPW;=m#Gm2I`-F zu}O@L%#*ik7{e{2^UIVB|2Z*3wD&xCzKFR4lI3J7nX(1kwln(Hx4Kv?tljCrXOzNV zy>UsFAwb3ktlQt8oWbG;uyvh!FFpit78I4*lh_{}*}!DXF<_yLfjG)-uD`VKjxkG* z3YGw5=Um=VxrQrI5qxCpuJaBSzz*0JTJ5d)MhFe>lUsl)CRz35Q{0a!Nf%()R(Em{ zFT={JK$PBJ+4}7Xmgyi7;dp2x%Klz=Kz4`eB8f6|H}+2kg?0r^0!`SpBaOh4BZGsr z2Ju{^v=2R4eG|Z<6!aat9IdVDX`?&R3mSkoE|LS!P9^^M3BE1ZI{==G_2m-K(QQp| z=Jh=yidz?8cineJ3e6wal#5LLK5h=%0fEy4)Ek;EX!-(EE?;y=d8(oVz5SZt?mVd! z70dpbTV-{jye{}T^(5I@i^P{|D;#Y5sz&X$RrepDe@Hh{mBJ~e5BtINrsL|2VtegX zvm5iO*wAVG^%U+EtpXqisQ7Yv(Uo*nTL@J3yk&lbx!5?cqkRRtecj#+CiuALF0g<9zf(w-FT#mfji$XfDXyAPl- z4x&$(^Qwhfjo*6iP~Tn^e?u$(LwXJeA`T04 zVRC{~N5~cDJZ{#6O7g!ovK@EYkma&6he6|A9D)&GxKwW>6bwd#8f7IVFn4}08F)=u zIigSRvT$apW^Jph`L)yZ#%TP?x4K6wNkPLH)bZ?|?tAk-_*1Ym)W=T%w=v~UeG}i8 zMtFWji^ul0R7~X~3^JnZCL;5=*1i$OAA&)^-IT9tD>KoIZwq?a^pwOe1TB1R0!WpZ zPQnDuEgDP1yCU`Jz&B%c%p}=`T_u%k$$W^31n1 zk5APT-Kl40Vafp1dRRUsl!M66F#v7$vCxzJDE6oV(NqG3X0RsVlJej}1kQc1hAH#@ zE+WD`#_4Z{tq>a)g9Ew~qMLQ>SxjoE>iQ#8h!6{?^-#HO`WP2gTE4y`4zTz!d~0_V z<7DVJah98-{!1=Cdvui~$zlqvZ?LXFW|!37#iUp7Bsl%&wXI(mMw3@Mj|&CLO<3jN>i3~`rwe6IaU~ePk+7c z_`g&<($}!`>?WWVIG=fS)#7ACSwzFVjW8j{c-y`+uA*RBkPaV17%fZSPuH- zjQdDJg7f;K=iOCJrP`daKQC;t5vn`=klCw z9YU_rV!NME$4KJQ1pysTT2tccQREcgzk#m62wJ{yR27U-7M>WS>Gs(3Nvg%+#)^9A zqD=Ao+IM=tF^{Go5Je`toQST!vtL99zH1gB z6-5HLB7^eX`W+!0i0E7aMbnP4^67OAfP`4(duLWt8Z&7r>B=?4a(u`6L?~v;;V=AA zR4M_Ofi}Vw!H~)iz|beXotVd|0{gx zAW4AU{Z&KbcR`8sAvJxGUyHt%Le0i$OI|zOyadZqLT%fHcA_wjxN~MO^vopz(iGrS zoz}e6a#+fcL4WBoxxb?>R1j->KStJ}|K>0z(fOWsXZBWTRiEFb8HkOVvQFSLd_LHQ zL;T4}GnT!DlBZ}I zL-M{}fC!!9PWfK$qf=+C%|x}1Xd?hqZ?_w)aIND=c$B#XPnS;x3vT0{yqF_|Ys4cz z#^{jrVj>Y5Y$!WO$*LXuD zVr1>(OEZ=0=V?$#J>yv*lBY+wXX^89%p*iI&32knmLtp%*EEb`%=PYFC-lFRjf}dV ze69zrOz_QL5+pUw#Ob5B)9iA55nY60%M@Zgq?I&lM5HOUV;t=v<~KJ1VO>60R=n@M z1Z086nPqNl{nDraemD&HKWUprfv^?QQBZ~j-Jgpp{jKO8NSQAR2{YdV*JIECF+k40 zPcrg>T+cR1pSj5H7n#lx$5N>X92skVBOo{WqH@IMpMzHyp@|Qpti9ZB2yYhpmS3o7 zP~1;Byq0Z|$z}y#snW)=V6K?&;pXU}zl)-qK>91jCFL0n&FM!L3#PhSht3Z5k#5Yn zafldqq(VNJ&C>gci?SZS|KA!xXO2-LeGm^F3V_eug4D6r_w=qKM_C=ijBvR)z>;kA zxPG}aZ(EXeN62tRa4Flfh_n$myTxR?@;bvi-?NCUBGF4+jPGH6wiJJITkpy}0#?8F zl@y4EzwSMN=j9X&?R$#_Ux1^qdro_uf_4y+*#eJ3=2RuBVhfoir0m%D3ab^RjAxCd zK|p^pW}k#)q)fB=2Dib@R`I&V*@BwfE2nJI*o(}JA?S_ODjmfmJkRpO_~S|0fC2)i zA{sjBVC}*HVOC-M1PLHk0rTxSBmX%{=x9fvQ~Gv^+qc4w93T>_GoHVJ+bGU^z&cs2Q=I6WLg^tg7< zk0ct6Pfb}YNd+@(1?o~>rRNtYMR=rTzcLqQj&)~^*F1*J0Jo$FK>2MC$hi}C*jBE; z)WQfaSmmVL@F$yhRkjZO^@0YoiC7e1_{0I2%Qmwftq9#ILlVIKPCHwnQ85HF>a>O= zlUx&9pd|{;Ic@v$8BFQFmfv!gd-igaa;7`A1)>j=vF>I$3(u>IHu-}8T*YsB+a+7yu61^1|0NVwpaDe)WBb46uI~LBWj?@Cc@P4lKxv9UHc1iTY_ofwAq7=~mc4B5V+0GUf z7Vi*8yrQXko5>-}fP~;)vU#2UK=_1-DeIG_)5S6Xysyb#nd1az$X4MeWJqM8RA;5x zC#s#vhX>tZS<^7a&qe3}VJP)srbjd!_j+QW0c^dV)TVu**irkXQwD0G6JD@X{}>E6 zZR98*Mh9Zo&c`NM$Yp+=f670SNxEKW^a>AdD^St1d^D)PrKMM#2pQkVS>#pNnp5{1 zo3Kj&|A8DMC!_vagGFmvSi`1qJi7m8-`>T(xNH`9SDM}|5MBvEh>Y+OBVjI1TsCfk zQEJ}Uo;yX6nnwN-Sw`_s(UOuM?zz1+^S0K_&P1g7PIAy+-(P;xA=HJ*YifPBcGO4~_Dg&zw>)Qnad?s#-8VSvv;)x$`8N?~XI zYglVMVQ*Q>HhAZyz^GSmVBV%i^tVX6UBe*c)^F6FwwIqBb~iC6f`E})0jv?*j1EIZc{<%dJ?Ox?VV=K~ah zXSy*hZ~m@Gu$6|YwmXSKe8C+ww_;lO=JkbAObiA(hJLLNUqSrJ#CQ~LV=?JN@7(Y! z=uPDlk^{AsF!FPm4v@8iHwua$22iUrv1xOKhN0)!irXPLYko0~dGf4bc!JP7TWgke zgLUF%c3U0Cff~4`13R_Ya4nkutwp?}j1SR82C6g%#+gZqvz2|@b4>2DwQSoqhImy^ z7(eUAi!V-f_CX*?v3Inq*!IO|)u78zpLj3)cJ3rOUlehIYiW>S4owlewsTTFal$Jc z%{Elgpgm0Mx3$+Sk3}ze@@Roos;$A?#wBp7f>XgXd$@ZH;oapk-&3lk=HK{FMqbXE zhSg}tWTl+oVM>sKJ;^$_VLk;qInr#;DX!7As@A=$O>N)8sE(SC3n-=f4mh5?7N8p_ zpDhPAE$9c%AS$WH1_Eza7r-epl+{P`i{goW2vmd!1~*T#cJno$0X31S@H`{+*)tR- z)lXG2Gjj6f67`wbCwYk2lRtO$9Z-mp!zkz? z_E7Cp{-cY}biC5Q3VQsUMxa@Y5A0}5SR2Yi#jM1#H9^)xOCWQvzQ^;HBoDt!2* zI(JpEy~OxHhIT15ZUDkC%VY2dz1lRp;}t1z>QJaVVuY95kC5C(f+xPye?iai+zy_1 zfSf1HaBpH$pM6)xUf24w5|nh`E*%Bt5o*;VeVXa2DUe+J4D)6-hM;Lpj& z&QD?l=|}<*HZ4Bb4Ey#;KpcjvWs)BWnnrnY`$_B0@=2#aj;ZmN1tLkLrmSDIX}D;G z1Iomlvmy~ESAf_obf}vSDV2%5(?ve>{=_IS>p0{c@^uW%}C+FOPIsTe2YLJdUfSU$uNeGb-pdOIRSB;*jysAvg^- zO<>pu%V?ap+=Z2#dLN&f^rA0MoRuizm==^ZpG#`3ncmwLmAd07GEeHuTNU<^G|=fE+N{o=*HkCGDL0m4_J zdX7^B;kGJY&QfP8$=IwIn??wa_#t@Vg4C2lcb#W?f>U&>`^MxFl!EG3HeJ3v#UzLTmB-_7ml zO2c1s<|eAFDOU62Uw5h0Aqb=oCu>2S__yn}2pcHs%qm|Lh_Mdb7{A&u69IQe%}u_4 z`#v%!PX| zRt-o9h$78j4XWKtCs0gyhNvlfI+2N5123I6m1NszZNCyiet{c%muCOGNqis4wott> zHropvO??#(4MYV6SD;`(TZbCGAL44X_}G!@59SfuVrQDv@5`>dCotS8AEBX?Wg4T5 zoGX6x3CJjE(9i+f)M+i0N{o7Sr`fLNS?rZ}ieNHx#eK2gupowoGZ#IoWCobQ6AfV; z8j>l8;tEpni8ea?U#a1&Hq3T^k=p4n`vp~O+ZE`mZ}j1!(#NNUSxb;d!mg~>W{>X^ ztOc>CT+wLC)CiZ9&c=X)+Vx`}a51>Wx)k6=BOW+J8*{8S6kUXfHJuHVLls-L{{k%6 z##7;1XFZUC`8&oGfAKiud_>93pj$2GY^t!=0C+>Qik;wegXdx9`=^nM6&uc3@VU>w zgp=>6ariM^2gZ7y>E=m&4l32Xh$hB6@|@$#lScHzOR2XdLd1;f-ZcTpZ#A}3_~z*U zhP1K=h@iJ;>{6Q#F)mV$){m$#yyy^xhY z)}X_E!iGoM#Ete*PH|h2c*-hhBE2XCTq7rizuAzPW41!K;8^4$GbJ`?Z*9K8(KAft zuF*FTt$4S4iox1nn%#n#l~1218|Qp89x}4B7SbmHc*!uhX%}HESP8_BHGnO5RD)8( z+puA~gTx)zVESmKDToX+ZoN)|S^EuT+T`gv@bAKIPpNIvZ5f0(QLDIOS8jT{sKL)$ zl;8SpBkJNaK5U|+f^XAlg!T?`5;S^ZpzRa3s2RaO{u0M}f`E)qj-<}D3?dht4ex_$ z@@T{vmNg#7DEJ7R(1g#7-yQn*&z4!m>Miu!(k@Rj%?UsFF`i(vYW`~b-$jbaJuVC? zFY_+G+Us$yB9GaYr?+|-+MLf6eERVNcau27Blk6tj6pzkQ=QD3$$@u>U*JRjXVXcW z_n&GzT;?tPiA%eeu`AIFX!vkc&V&FX3w|ib+RcM*LsXyGMagI9J&qIMWQ_b1<`(hW zUl_uXWdQl)0XxI?!tRZektFU5C!b)z*RXVs+EEh)K-|A~-@*7|25EfI$8|J|bBl>W z&%6zrw}dh2$`~z$g$Pt$0Z~ICAvOK7a&t;WUac$Huwg3!EVY9>D9f@OTT!jCnm#4v zFmRgS;HwU~(sPt_GDMpP`99;T+>uq^UKuMc7Uw#AW%P^{C^lwk&6hxlT#E_XQp~dw zPaBpI4uZJAqqOCmX@ajvu}}d!8%(Rm!E{P-v;S_BU)0l3t|93W^L#Sc;IjyBgI#bv z>arCgd9kWqz(^A2A${yZrXQ4&yV}LFJ7^)lXyYniogf0(4IkSn*wGv(Z|+}C}4lqf|B(8LZZV15N~Z#d=Kk+}oKH?*f>>fL`v}g?z ze)$2!eShnc^(4TBODno&DU2}xamy%|R>nl4@1OC4d9XT=`ei9FNTOq~DN7FXbJjqQp$zzyleh7e38k9oEhWJiL7o-oDJwk)uYU0E zhGn}{Qi@v28lVtDjEzw^9#44|W8J-q2pTr?Y%|nz1wp#XsxuFTQzku8s!CK} z+yPAtLxrpGadK{%C;k~#FD(^_<8s<|>ZSk`#XE3=jZ{NhJKC=#?WaQVs6{f7R9W~$ zl4$pVGZoZ`h=d|$)I{;rIkLemGQ5LHL?Mv%kWG{CyWJ3q-@gmxtr5+&W4#U@G8otW zunCfW=gtS0s<83Wi@W?ih#S(BoEBLwMBaUq4URU7_6euhwS@cMM)A^ZJzD2PYzU_O zAlE;3F#_*7qg=tJej3k>K!w#w4=g5?b>TjOI*`iG-=pAM&!W&>4gE3vbAC-m_L%0J7}RJvS` zeywBr$1?sRijN$*c;(1e%|IYUc~8>MBnmMMwo`MrNj&#v_V?jAkhP{o`yl950f+qa z+;@>uRJ?RVl>I5D(|~0wNDrejPsc>3G}%E!97~PzbjqA9kGjVB9i*;1h(hvTxgyBk z0ZMl;LcEpvhUl5R@GGKlM&ya1s@gQ$;8tDFc*09mA7fA2wXAE1(y8EVhk%R-dMa;3 zP(d_E1@21=8Z3BLa1To2QE}NLmoiCQoQ3&iHg>1>uitCmEUvG7Wq|cB!^PclVissl zA=L%D>!Y6KKmi0)v&50DYvCO@shYH%L8I(S16fDDj^|f01U+aoG?uuq(0-$p0PsMF>V+L^dNtK`4gW6Z7JB+85 zP{fL9U!(nqqPKFjl>z{fxj$Rol4ly|aIlUJsd2|RE${RTV3kWjeUNZ`V-7lv*NwHm zNq-eqp4=wT3_iCDy1l?u&{lF0Gu$AK8sjcx9|87K7tDO%2|))fR8a214u3=3an`2N z-(|FY5{}`ilm5iQls71bACgsDlC^?Z}z0m0JqTMPm_V2g_ylZHuHjyCVRO){51Kit{V{ zaG>y{>m;cmhyaJzk0q=5geK#knSHF798=vQYu==&5Xh_z)o~;x!WyL|D{pCTdqdp) zrN8F%s-ImsWfLYCK9SdZ1c3IqC34G(*CC%_oifiq5FF0<<6yvy4I|bhuYvAe@OdbP=`Kc1a-z# zV^g#Giy?l^lzAce=TTky($ZQqlkRP=NL9^kd+i1~;CKk5co8qPsRMEql!^ z436;@0TZjpvbFZMRKcqK2O;ZZ;Hk8JuNh0E*6qpSYg7}u-kMTW4Ivx4X=pJUl+$KZ zJumQ(+boxqkKC6hzRx8704UMRe)M=N{xB8+IAF0XBpB_2Zad}`r3kzwp{v)TJ)aag zlV|_%6Id^Cw_1^?-Yi6RP)+-ZtJ~}OccE@y=!=Ah$d=cBX6W@*S_$mCAhb2Sz>K2h zcGb-$L}1hJcW7)V=NAgj96JuwQ78WpxGj(M{hqxn6~i}w%Cc`7={IXcnTvKP+a(i_ zavIya{;;PhK5W2{(U&ZXG*P8qC^u60(nDg`P7T>GUq4rAFn~=Evq^MTKx8?LEtfMR z_mS0DD2tCzgit8#365P z+t^#7H_>wE@6MW#`nBQu&X#x7lObbx@6;*meX)CgTXt`_CPUXcTy5|rwfccK5fu}MOTKAy z|A>aW_N!ki;*0Q{`*;i`1DGX6a>^EdkNc08u%@Y+joBNuSsj7{$i8BvT9ox}qzS>n z$+MJ^PIle#%s-68SZJW<R^&jJy?n}UPzl?U**3+ZzH1G-_`sHH$f`IWuW`ITB57LO@ zWF0h0}$Z%`ul_Tf+z?+WyYP0HT2q4I%!l%A5UT?5bAbi6FJG5ts$>R)|dY$K$bDimrPXwC}v~g45^jG^O zD&Fml+vqd4&6;l(c)qPZ?%h#ls{cvlNEddVG_ZXmKrEhqy$~cU+FQ%2o>b5}< z$cn+w1Q0o=W_CZ-{#uyo?e-!pn5W{q`(oGEIA)sEBMxKuGezPYHuFF_?Z=OMqB1c@ znY3blXG$bg1B}80y3#N|6jL+3Xo8PmjK^X+9;~-m4g^{X!F7agcE!FSsD97MStC=8 zv$_ioIr*+AN9u;LfqcR`vl8xDF5~(5?j7Y)xdF|J(b2eh0x~pI0x3`pts`CAdYf0L zTfxdh+n@P4Y^eGw<2{fdCVp;*9|}xgj_UG2+iN`A%7?f#+f=inRQaFE`-BUUuo}5p zn@v*##XPZ~fL2b0muLp2Ud(0F%vZ-(Y1cL`DLYY*xkhGt-SpW5=zwExk%s>nPxCzX zDqvSsZ~K*+m~kLFI_@BE}<-T zPMK?6A&=UdykeXpIpM+%>3V;^KPYKwqDIP8>QMN27K;e0Mc@NKU{*Aq^1_8Lbs{#h zY&S2URT>NaQ9tKdYJD(0o}6 zP1CkxJhSF(Zn(S`2}?=44pg{C817AkM%b%3|Iq?CKAg#k- zvCc-(1bGt#zau$nyFx4J?4F}Dco}HV{Dw9F(rG>I_)KKPiA_mCc=SQFv;b7;!Y%$HPXY)S!Y)UTh%2Q>pVJuVe^1QdPwT+ z0w8@!g$sWxm)pOv)B>pg;Y;EzTHcWE&JpT`AOBjD-wjsM0W@6;?P_W%s4p3ar{+Kw zOOfQT5cHQzcl|th%<2YRwOhRw>Z6~$&BdGIIku(C5vp@bF4;Op`V0^sBN89V)2g2N zz4SwmL;vQcdQkX9Ipz`0jnczNWAY>es?x^!TeHUR(TIbG^e=%uD)xFFf5VkL+I83b~$%|iJ{tl<}c=BS1D-&*`+9` z#!h?2SMex+^95$SyytJJVo%lT_SUIZqtPg6QsSPp0wV%SFTNTMA9_ExhT4m61vJ&w zdd4fs#|e5tq;RvfTs5J3-r&zOxHMOAZKFaOp44|d?}XO?CU2TU+r;PUeq|iah+_X5 zhw5af`1VWwfRqH<`Bv8IfxWUiw6b^q*-4jkc_T&p8WpPs(u3J(Jc@I&p;Sn)1-D^} zjf?S(V9o0*A0|E@EMt~gtJnp5228EYFE?xq9kCQGwat{sxp%}ZC|ktD;ty@@Wmu&j zkb9gaxq*1bzieg0x>*mKMng*1NJeZRb3}5He%m;>>xr~eicP-|M|}nfpsmdB`GyUm z7akzer~9WOhpDY=T&+?mLI^b=fN8+}HP`kIE1(1@cR4ux-%Z*2OJH>LGUwI0x`seF zJY`Q8>D&-DE<5BG8iAq4URUepz%2*e4oRm)W^Q@&&iP9uJgEeZ9Ubs5p#1tK@+rS` zIjYo9mEvyUoYveHiYT-(zSM`B^_I0NhnxMW|HEBeFm9rm4MuDULq@$Vs}PBn%&isI z_`+Ev{+)K#;W2jzGEyJr$P}-0Hy}M_1qdkqJ7UEw6hYOMx!+ZYv2d-2x*6>f| z(*b%7b$>jHSf&Jcf;#aeJ+MX%^x%A7!Fn4i-4QsVl=T%f2->O+^bZ3#Mr_Nd8Qy7M zyJ(W8Mz-M?JyY=_*LaBI2$qSQGs0pixiO}3U^u9H*rk2`X!O{B{dE7hqd>D z#o3kN&g(gDa0Uwb5II<=&E@>mofP#u3EJA?j0XbVaQQ07sbe3!JRUOuk(8-&TPfeF z;q<~r#KU+#{%#y>Tl63Kdj$K1r#RM<=mb$ronyG|ES z!?uRh;=Co=Y>q?Xiz)t=3X_}v#zF|+Ozz8yVpXlOMfcf?*R${J&85|!eB<5GLgN>E zz2QeZTYG7x{tRcbU!wayVf_zP3LhvS#4qY2qg2eH6N*{dm8OKhR2nM8Nd-W$9B@At zu}#4DlpJ>30q70TvN^I+14uKHsE>m2$pA+B(-b|@_SH7r;hbE{yb>S@)d7H(Lr0%8 zXrEmuoY0QKjw;^M6i8OJEJBDT!Qvp;aawJ#ZwFsLqj{gg@?&!9mc_W<%*2UXUiD#<*exzEVUOvmOItoNPZcCj_mX>H z!&_LiMo+5k_TwVcUemQJs?qwSh2Bg>ZhaKXV;{QB9T)QCf`v<@4nbD4wB7IT!b7a! zz}|~9it{#lRLHHr`_zm9Bmuw!)H?<*jzX;8*oCPy55~jNOu!MSf+ON7L(w1Ab4-uK z6m`UETvSwI*5aXi6Ltein21q$yapyL=k)upH6uBcJAE9Oo)-WG$^xsQ6SobUTZ44) zFw^-L!&JGXcUsL1-c@x5>|?X3$A-S)-^zweIw$L8PH$~f;Hnl5qohgKg-Ri8`js2W zYIYzH-c;B8^kBt!68wchcr*m4Rvfo=WI0U1RAL#irB{=Vo|7W#3qwEU=69yprBRl` z7BKrm*-}uNT(Wp9N!Y0>C*w*NJ3)UX{ET3KRt|iMw0{Q*%j02?Ffqu+VF358rf5u4 z@Fg>;8!QuS;Zvw^nXu}<*peE;Jbvf9WQ=Z5XCdq)Fb0<2!{vZCX4lf?ye8>P_kF_< zGoe=Xl{4XC6^j;CV5XiPpDpmw6;Tm=V}K1%+L8`e3BRh_Mc#%>bhJjO{ zcxi{K352m^<0_iGAXirlozk|WwDU)r&JhB}fO&)vpxFQ2>dzd8zHK-8`c-Hu0<}Nu<(Yw(z!`&D}%&G2m41Zz55moQGMf_ zekWa4Qif?O1hkvO?osp^=q#ABx;55#%4XcZqsFmZXVrFbXHqpwD8FFq#G1J-cNH$< zj9ghwA0O*bWxTZic=F41>f*g!q9&^cN|sWtx$}zlg{`GXp{C@8xX=SdRJaU-^D~;e zNRd=sm27TEo=l=>wQ$$tK^j^9Sz+7AI_l_2wjNK+(JerT6aLH`25tHCQ-yT4CWK zwrm~nxU>AIpr+kJyS1cugaQoDc*$p*xxHEfHZCbV>O~ez2^7QKQ;v~5^a&{Z>2)#{ z8;oE8lhz(2SxpG|hTne9n1GaCOxe( zx!Z-mIsGk>`Y4%c!HuRt_(z>;p0v8l$@OsZ5w}Zuie$f+Y>pxNmU!`|k5JU7D2aFo zaJ4=cdS%3Dwg=b?UdrA@C+xLO64!BZ{0=`I0FvE{ad<(Jt6I| zpO-1?hB*A8H>vdBsyOZ!OcayrC0v>mEDG^T?<_ODzS6(W;z(9yo`Zr8J-(7<6laTx z*>1I6y|Z^joY;e78DW7B$b6G-5WX91>a%x-~R-rHDY8cVZc^C948!abkcUUAgRvA zzWvbwMni3~chvL5;p)EU`>C~eG{cr}_xvUV(B-qV=K>5dG95R};T1F3~kMMm}E?#S*iVDF1e_}pG@HM#tDcdIm zE3pDUkZi_O>YV$SB!z1C|CDcl^bNRzyYle>~WYdixqwqV68i(v&X*F7x-E@g@=;R$x= zNUnmToimuN#L_kMJ|?ZNLvKI4xh(N{tuTlxAfy@|{gov|>bpD(8mSR(JN5NTyLnDs zm;6wFxhoa+SOA$nzTDi_Cyc`qY+aI=ju_FC+RuY{$+QANlcTF~()8LTl&^lv=Wz83 zG$#Z05&dQaOFE#6VTlSFf;m(^^J2KCU@|%@W?8z$ytpmJrwQa^Ou2YOy<>lOJlv-;MGS{% z-qj%?C9tlfA}Aa@2~=}QEi!BSy*^m5e>H1Tksv8-Lx%5h%rPs>Ask0tVf^OLqRhpD zc~v}bb}nFkuI=7te^QyFd0JZ^e2R^-OYdFqF5Cdsa!f<6Zn4HSql^)76p~~v%9%%` zxV=LD%1sScSHj6g9M?N@mD0HyLQ>{l7Am+r=qBVlfG{N1pLaGfr=C#k^4uot6*Vk( zVL14v^m*o(IeUMznI~^yhgmJ~86g0V!W8i;$y`2v(zmO`99M|j)h4X2aukL$Mq4CW z$XZB0*%4S488E{jskTdga=2T(=MN2@v&fHS&-(S`&8v?LxNG(DuxlEZfZ+m64UdyU z!P*+TZb0>p2Ch0fG=VmK!2h+{)azc)vMuJ<%{Trk#_L2OONiBs$?FOZ8>`L#BCFg| znV|F`O^Q@ZwoEI80DX5hP}cQ*>a;TJuL6qB0;+W+Ph3fBDH*yiC-}KZ*L~!Z5?dc&|c7uD}nz05SopaJ{Q_Q@R+2Nb4$F`+%^y2fr+^{4pz8 zt$^tV*CH=vc%Fa^h!!AnT6)A1U6G$cGvDcLt^tA{)okkPU4HYt9)qj3D@8q+33;n! ztOhm>CFI>$OLW1T7*KXTw(BLjk^hSgGRd2obh;@{>Q#7BkNCNOR_o7Y2aGDM(gAEQk|tYgcG54iDRfk8YtKl zeGbcosclQz*=kbc zKOzm3g1P#g4Vh_NFRnEl!i9!IX zTU=+3sCxs5&kX0gv7^rFoA_cw6vr;bN(Zv{n+{=uub3?pJ_ty6aFO$%Uhxpm&zM=go)(FBCK!cv-u&D}n4 z`T}kP8%RwEvTq`-$Wkremj!FduJ^TGjOW%gXsi~W%!Mtc@E|Yuf-T$j+f$~d;>!G#>$aT3NKgwKsLbe zH|RjOS#%l)D@vJ>#lAr3ByMVstrS-{rPjUY6QcQSDZZBx_t3!9F#RZgXwD|rBICYW zdYm_(Sm3KzZ8#+AE7!ATo_pWY^E~6@$}X2^&w7{Id3BUhbGV1QGYud!%hpv0SMr7p zg5WT(kn&?ZWkXOyJ6(_#t~TUX@P&1$`}^X#4=jz(@Z2}O!y(Z1+L8nuV83 zs2OR=hXTgBsfJye8r~hR8=d!TN|@?C4hd!D+9x519#&${o8s~up4r}AQgxt)9I7NJ zstyM3yP(%e#V$)0lZBr3^ak=_ONS~Fa6ZUSlk z>F4+?*>Q6v-`+|Ig8w?ds9dySa~7Dpmr}L2_hi4?He%{D!CAL!4}d`GRcD~w%T7I* zv0nX#q>z$fgOQ@PpKhg{wzIy!tndYsz4a82wX=K8bm2XaPJek5Jd1*2itr8O4XGp! zcj-MGW1{SCHhZd4 zWX>~O4Vf#dfatCFH2Kobh>W+qE=%{`W9u^Q35pUu)IUx4K-3bJ+@zbY>rMuUQf$(A zLCmXo#8%K$m|v&KdQTdk&o<(Zxh6?DfOGV3|71$#oVs0uC||r!lx^=hK~@8_FtDcG z@C%O7yip|s&=SlfNGTF}KF>Y0@KD#~ZMGV2#XWXmfp$3c+YUk3UGd?lK}?De8JXi% z20kAIw}6iqWVI(L>~2nB5<+pUp5|kx>Z74Emh}ikSFno33`FwENHKkj0@Hx2vy;Qs_zSvD zGOqfSfaz0?&;T2s)f7!~s%v=sdufMXt<5@En*+6^NS$KL-x`HZ#qHFy(*{wl8G00| z&7V{|{J0eY2mb6=17@JtkN?7guQ_WOu8?(rztWlYdx zKF_BO$$)~U>fQCBD6ngB!fG3XNj?y&3yb8(ququ?*a6zDF?@12Z>qW5J?~@&n-@&o z?naSy&$Qjw_jl76lyyrwx1DHf+d__R3D)XS<#`GdZl>8mi6C8K=+&z(e?qQRS_As} zXggFEoX5n{P_{!vIt*ZZHQ0$szFrqjb){P0lP(SUV~c>kvKj0UmX0JO!rEgfdz~4E z4>T-4cs_tl8!X3(0GVMHu+cyHnuKBHTCWZ*vs|Xv-Yf18`w4%P3SMsa@WbVj+@G3_ z>6dKb%1p$=)iQ`e6ADq<{03ygR63#hI>LiXY3#;X-<|9t{T$$=yLF@f12;A)S{T(U zeAbi^=OZYG=VAIJg8y$rb~^0CS9BUgwptc_DqL=yix@b@j&`-JzVz#g=V+9Ogtkvsi4eC#CP> zqpxq1bblLG=a{Oq1&Eut@3$E_Fr;kWTQH`U-agfTEg1&h%NN!!d#9o)kBGH_iAZrZ zE%^ulPT}hS$$f^nEzZ(kU}!J1W&q#~%6OuhqIXO!Ewn;L<1d$deh8boTE);wu$h|8 zW(BGF(+ejZ;?hx?pl%4K6D^%j0$xkB|K7!Z#dB!7>oH%hOzAOX$WlJSNUNwRae|Ae z8&6w%nVEe`0u()R{EW&l6HBE`N>7yvQ1P{wD#hRO0vD|qz=Q9gIJbcc2Yn8v?d0h{ znz20D`=aDRKMC@q!#DDqhrkb7N{Q9g=z16$h5|3N7c&s%A%pJ?Y1cg)q0tD&EBJR+kuhtPf zL~JJkaDFTk+G@v$(rj6GAxYb;4i-DCRID*#_p)pmq`#z{Lux&cwB%@$?LN^uZ1#@@ zq#JCY$d~3JikBCXC03Q+@gf$3u}4vNa=_M|WI= z{_CeTSJ{l*An1Ll=ZRC*@Hk`UPyb2b`kfpnQ>2IG$Xb7|bTlL|{|KZ9j1M%gp{fUo z3>##FavfBlUt+png=M!8(nd(g*7@3zeXZW6Sf3jJvd$vHu&kRkGMA*?(QOC8A&u7k zYngNLL#)9mMt7uCBgZruZP3Z#Aq>@&O{m)+B2n%pEq&jwY)C=fmiGni=0ObFh~SyZNUrs3?W-X?O)dBI1D&=nW|nePlrFpoiuj z!kQEM+0;*3R)~8pCbHRfVvxk?-N8gMfeRj;n5nfl@kyDb&V;4aC|*7WBp$T} zs|)i;r@928$PC7snb7Cu2TJ><6O);Lt&7Oy+*xBZS ztW|DX|2i6<(z1kFU=j#k;Y-RmlO0A8f#O#$_D}7SPzvr&bltkUCOL+FBMfH*+l{az zwp@($ofq|~=(W`Ss^qb>o>Ar9&GYa_)EKc5vc|@F_E`Fz+B%smDLUV5?2gn9z*idfkZ{Y zA$WbZTY$=FQy0;%lgdEVT&!W*rxS-a z&ai*pf-j#J;-`;i7zeui6eP3OrmmrMq*CAn$FTqJnUN~{_SnlhKBNPIE)&=s>(Nt4 zb!xilMy8~GfGh_8na3hxV1UU3cot(zk&5}c$xoaR5HFsX7$dr0{#I@GyBhl&?QNDN zg17e0cGaXwe zsX&|uwuh7@QR)_~ z4vd(D4wSIO_@+%O%&I6VHQA_FIa---ljdtM|0)K=Fw~sAcb@s|J?~C^KH%*ItH#&} z9mMFSa(SC@wJzTT+r#eSsVH!Bqi(;?1v#0wFOpHy?bf+*@mZz2LpfojDVU!d;p~IV zg=}AY2?l$R{EUpp>mEqSE75XZ?F@+<3+YNO!DvMDCXM(x4{cl+4*^>lNgbAm`*{*H zKK{k=pyN+&ppQ)|VmaXwSeE+Iq7%UJpj#>zfKDmck9h#q|M4~uw~F~+PHu3ir^^wS zZDIZ3j$L*Wc*nZVqifyUP9n%!N0A^%FI~n41W+T_*MEd_laIs1NH;<)7AQpeWy$3~ zbC!@=_SI?raBbBKN4|l95OxF9x@v==5#=<7OR`v`_K!RCHXpU>#&6$G4k;yqf~c<> z{k!j1WmTwQ47n)U4UJnVoHtGB(JR!mur_@GeuI4n{f{di0wv5lQm)m7GGWd-`QwCn zNei$y%^V}=d8fr>W71vIP4siGVuxx;AT=UrIHIRE5?_m|o;NCIGL$0kJAd{=*-9&f zD45e}02UYuDcVjjv^M>`d9<)4W7M*$OuIx+*>qIgQk#nu_d_O`Pn1yKMGXbXtE`%= zXn@`Gg@~!4ax!E9#g)|JjklRcLbLSSRN9yVS{Vn?0dT2wESASv?QeCckS{LAv#ZLt z3R0p}tmxg;dPE(?9G#b(%zPkG5A-d}2h6Bl3%Fn)OYTqOqMswHu#|H=ICqOu)(Os6 zy|bGg>MQUj_glP+hpCp2@{=7Cl7yilVZo#vdFqV`r$Lo6Ri_d0_u8@Vj|Nc=W8zrB z*bUrHQ}Qz|hPmvOvSLMxR4fA9HXot?LYczYR)RrGVlRKG0cjj>TEaEp-XaZzcKKS< z8yvf(uzAhs7h)1OIg_Q=vc`?Z(EieTWD|z;R(Bwpx@rsGr5d~CWtqL@p_D=6xjbts zRO8iAhzqfC(Hom;5E18VEJLP8$%>8j&KP4FPvk4VWk6I`5;^LwC-S^cbo2WLWHqW> z-|r$^#EyzC9Ctf*9+PrW{QEFPUiDIo8Dz+iAxLQ!;gi@M2yu*u;@Lg9pKvOy2osQXyw9E(zF5{N2!r?2oaM}d}Dm7^>0 zmd)LBkEDQNQfFnN9`{HQn1c7dQRgGeGbYip^+oKJLy zA&j_A$mEK^xuJ7R3;2VvaV7kba*Xc9>Zk6|+ zWSA4s@S`0{hL=`Lbh4}aLLGN5gNlsHl<9N&9+$nkjNl8?0fGDB_mP3)(mFE8SDKuY zsn}iEwbVsh4k@)+FpwJdsp_{ru6>*|@gQ52wh=k^Lrv~efL)AYB(e=PhTat-?{Pc* zmH;+D$-mqdeaj*DffXsR3spS!qQe+5NB$3zo893wtL4<4l{e^RM5;e6&o|D8URn0j zg9c6YpPlsMeAkXv3@z4GiL-VjR5!>Sz>XR$YYs$fLx>B<6D zSSr3C+}|Gx1LtK66`(~Pxh*(Qf-SS}4=-4-daxjQPaGKzY>OByx}ID-5z6gQWI!>) z+-UZ`p|#(*%K0PI5MGHMdME_P!y1i~=0FDpHicW`^M>cJ|4<&UXSn6KB+HN!C*TuE zFD!GYLRLVVt1W_IJgTf$numyt+QT5uqsx$gh)Rr|QNdl#ZZ2(zoO5qA?%R)hz(i^? za~8vsySQ}Fca|A*2WFLEOeC!cbo$us5+`<;eC-tV@M*Er`Q2QMSQ9;^_F+ zhIslNoDDfGoa2|7MP4@?F#7oLAbXsEgxV&kyF)1M(Qgs6bH}WrCKRFjM-DHRowPbz zy?-bg*Lnt2+yQal_PW4U=rB5LdiHivce>;k)Gm*Q90G@Ykj3&`RS6f(BgX9`e{n^b zs+p#`jD#({I`4Jbr)2>R&Ji!#qIOdg*k-C(X~14BVX+OlKx<&vbH_r>Hc+ddA_nif z*rI+se2O3yK~C861mKWt!<`((o_}?J9I$#63{o9T_;*_l+a!JJmDlXQPO%-EGh5V2 zA%X!K|+hYC35&v{xbHDor`} zbh^3S5||lt!H7eX?|b?9t66Y(>eh8Tw;qfs?JqiFQ*&*rcFsJDJz2MiE4GTO7dg$= zXa#er%0rAbxp(bGiG_aRy%BxTByWGXTpR?$CkBhQ zHD#sQs?;{1e3}sds*Vxg1J(D=2aEc?YF{-DM`fjw%qolZ+U?pdO6r|USk;Y@?xOxn zNJwTo>xEsHT<~kWHvD5+cx8!VQo;KldDtq=rgtoS+GFf0x%t-8X0O;sR7KGFaq!_o_fsr9~n9xl3t%P^9gP~{2is;_BZbTJc5IIiJ zZ{N18=??-487-p{LYf47B3YqWzAn~Lpch_% z;eecV0Ln>67EH1g=R)7;g!fTI+^_K%FWTXOZJvvGSRIuvVn|udx~r#4LNrN6CDYUB z0kPilWO-9S-pUKo)s?SQ%-UoP(b2L$`NQ(XBua`!QGod*+$>Y_fy0V{C=B44FZGX6Z;HNaP-;Awq#6FhJ$o!5A8{|8CVVG2(QMBF1xWZ*Ap;1 z_LeWco}6eV(N?-v>6BsZaRnA;J|IP?*gW^W5iKaG_;1hqiOABmz&E z>oiORF@gDdMkb@nzPKI#;Ut4bt+d#!SN)MHVEYXa%cPxahvH(Z4ITfH@>II-90ZD- z$GN8F^dZrfoMifIYZ9Y|*3paZXV0_s7Pzql(T>R_t7pQ>DTwCjUZ^f+5kFaN^atwz zd{$wekPO9uU}9<`OAttS@dEPVH=Rh&7>)&Q zczJ^Z-i){Z^%>0zZsxd-1HYNUV}BACu$p#5<1gU@Y+@oePk@+7?ee2`daE_$KD$dX zYbOpH!Zbl;NGORLuJ8vqZ{YNMPqE;}blNWj_Fl5?IRFG$mJiT6hrYFAtl+inERET2 zqPvUDd2Q8?bM|EuZHt$L@jr40(0Bo+YtnH^+WSt*8zGhtvAC9JIA^=Uk%V*i1H(~8 z^1bt0(!f5$hd3jln}zU#4nPh`yZQ$Y$2g8$>WAS7yXA@?ty-GDlStWzaNNpz`r0Bx zyC;DouGSiLO|`{TSA^+M58SgPN%(b7%Y7yKmwlxFJsB)hoTC zmZOQhUyII#5(XR%9*!rkEc^Q2ljnivEPU@ zU1Pk+c*PZeyq4?NFe)CKx0=5*o+Y9nJdvi?o?YkR@-?VWq|-a$5rp!32IALGQlpE+ z5J+dSuGk-TwIbNz>cVX{X9e${FKgvTUf##D5T=H_Zl?)m*$>qsxhI3msze7`&U71_ zX@)u*dP|+}lI8)lg%$P3xCS)Ol|upHm4b?fk=FlxUglJmHu4XhTY+jdA*{DfNtKh* zyYguLz< zg|wubF|Bgc=^Uv~i7P_B@%m{^_Ia+?zv%N^(KHu%&87oLnf`lWkL_5)G_nrz(P>Pz z)#yn~o0h_D%7Pp!gClF?QS}o=lpPCF@KzMHxonmQzCL^lwnw$mx%{V=p@_uOz52m7 z1_vQr*rbk~$?lp<-9!0;35Y&}(hm4T3>Uv~p$;xmlH1O!9Zf8@{eS#RHyuyibwKQ2 zl;_}{pem`KEPuBX*v3{KdD~kacM78(cdv;;xL!cgs#5ZtyiS( z|2AoMVJkjC)V041hWKat-hK$Bj+H03?d1o$M|9=ESz=(`0ZD%!dNmHra_5tF`hHLz z52tk0QaT2#d?R&O5t&xf#Li$!s6Nae7mQ+l`DdP~pgDe^ycqxZ?mGyFt1L#~;Me2< zMlv%4Id*}__Lo0*=lyoJb#sYBk01U8G37sc;QN*nla0=wfF#Lu!$N?G)WhIFWxNnN zMaz7%St1+13ZUzR6$#(oIcFjV{j0by3Dgw9 zt$4c*rXoKn55RbX8i3cbwbYEIK?ue^W)AVqG4J@^88qs-ow3VFg^)CY|10nR0M^w~ z(fwN)I7QOct|JbvUAN3u->i{O3rAeE;^SRu&L}d6F$Og?Hyv*t1E+GkFIy5`-R>XE z>iC1_YSj@Zc(X|8p6(pJqnAPH4Em`x>Jvy0`1s^td7X>N^$EuTaXTsWA*S~F3sETa z@sZt|q98JWIdX${2+!+0Y5LduiqyCpD9TyW+TP4y>k}gB9%_0EiE8U3ig`KAlkhdF zv;cWtV)_0JMBZQr##mG{oexfuY%;q^gqQq)-pSoKFjPAU4fJIu1wPYuIYv1gAKuSEmFi459w zKUKu~+$`nU^!CiPBr$Z(Ettavb0#tcNEa5NUu}>fGSl4M(nLqZ;HSQ$jVM09%#EL} zrCbb3D*y-6)5UJ?%YG~Z)K{bp#SIUUw|xbhG`vJ);d>?vT=lTcT*L7MP z!3RVJevKIcr1l$9+<0$O)>Lr<)~(X6PMv@TP6}tpnd;MdBX8Tj$Ho;4&wZ^)&w{sw zp#yECzXPX3@&V`RIE|?G&AgMoOQWWu!ez_f%GXvi9m0TvuJV279t95ePe%uxR5*JC z<(IJkX$t|4IjxXhmit#3Ak~}G$k=Af+J^C=0<-Nn?HV%JPkQPIaK~+4Nqm>oudX3G zW;)ju`z95~N`*l}xn}RFGc6Db&LVn49}?fn*_P`cFzmnk^f0?NsKgklR;+p6|EtR~ zB(Nwi)$k*IA%%>8|6k7n|0BL-ax)@~r=ZAElsEEYNLb-*hgJZusK{iIs?Q0Q&TJxj zWAvT$f`mG%?+?Dp(H-_RL5sYC3^NC*pPeA~=2WcQ zu;c9;N@+8GwTDI8@a@{Xt_(($5d%DmGX4hhWr_59Km%CBQPN1jPd2ECOp@xHN`>T#S(?v zBt8|IlcOjCq^K1J*YZ*srt z&LtBYyfVFPcib@~XrTP&gqrgPgf&ExLWz=JrgD$vhOy0;qpJwMelb1CPCtzX=-E`^qApSsw z51&(c>8w%;JaFFkr=Yy%KBcshZ8nEpmkv#OORs76m62pbIu3bYV(3_WRn!^L&N!ij zt8Z=PtDw@O+xFI|SCtT4i`vsjoETpmEv05`CG_y&$+C}EJ?aWMEC%=Z z_kN8Yi0t}&gmzNRlR<%cA!>>s9dia_aTqEa=x<+s3V`*89w2yr?)GAT7~nv$JYxWE z*Po{?goR(~s{`o7Ik()jaBRmqEiUFwQV7fr0UP@hyXWbr;_l5J_H43L)@aT&i%pa+ zQdw%>vXa$j_2hIW4=hw?!e#6IiSl8kQo@KJ+ZOkxU)fjTrHP2H)|G5Mh94#wt&yW* z(EM&2r*5m!T~A7a5e_^VgPT-sUrJoOFcItXthjMIOsckA1VjWy!O}sTQ+*rD-^ecv zJd{R>pW7ed$hVOU;D;PiYzM5tG8f_T)etU|1=w2@03v!Qo>LrK82)3uUcv7r_U9St z45C}o7OV)If1{V}lY^!iVPUtFxmrM`({&D%NJ!+s_W@%}E4EfaTg5Ur`mM3H$g73> z1({<~O{PR`ryTt1nxhk3fEENTU?}gtapIxF5uj-lXt=@q;tV(#I^S}7?3bRs@y$Pi zmPxys5vRUtYog;ozokm<)`U)4X3oSJYJAA4DN;Jyus~NHm2d$!#Ajvse@hux$im@m zj&czk*35Y9)6i#YU96BPCEo(WO;cPTNR@&VK>9xz<@4X$Sa?_FRwvIgOho50s3ozlkX}d5d-Cz3fBqR^S_DqgPy?@LT32^hVStSJNgttz^kHlat zChq;k*~Na=;G~SASXXQpks+|RzE0#V!6Z_Q5hkVMkocjntmtb`(|D6!riXI1vnQ9(^i*I&6|qWI7wv!% z2&XVJXdGVeAv|`%lPTX;DBwiYsFCi24EoWOZ>}(opecB@d>sW_5Ut^!hR8u)$D!%sg$&G0b|$ zLf~aRqnvWe?LXE>9m)^E2UhN^=1TF9dx=$V#BKES_7Ft+^oI#V%B;c;)CDQ;Vli<~ z8d`oiT`t>t_9`v?u)Urz9mWCF*n@qxBS2B9laeGfja`n)Ww_cdF<-ft)j9AQeE$oF zt3KtSvwJf+KqFlV@=3veRl*uvDnejp-@2ok?IJ8wg3@F)X###g-I~43^RDXx zNXcsa%O)aiKmW#!hgv+(HB@JKB-N0|(q)=kZB9x8nVSIZ{z}UsgIsW7{6g%Oy zKx%mllq0VZXWj4gMxX@`C`U$}Ka7~0E$fDKvSwG!Ws~{sDa)U{!0A8r_h}Wb3(Wo= zepx_9==HCeg>urDEcEiHR{B&}W%Dxsg6-Ux>frtkPb7hQE0tFMYCoIri zAYnRFB<^$EMd@KrLZW9k&Hz5KD<;PO5GY&a?Q5&s;e>`dT8jg$7yg6JY|j!zU3w9m z^lhh4r4P-TMx|wLvlF&>MJ_-#9zx8b;26|2I7@UC?L?+9=pqk3&~s6uXS8{u;9w{{ ztW$!TRCH@H3uxl7lnne989)wPmOhKK0tlgW>M8&jTKw6h@XPzil}uKKqdVd%=Q>lP zw31l0R>cK@90!j203PQm89*l0Q-ZM`GaHL2n{=y*rnaPSfM;0_9LQ-Ue@QpXVQ^Ji z=i4n!ZcCT#zg=(1EWfyJuCH2!k9)KI{QPL@ZMnp8ipJ&6IGa-OV(A?w)b^5=O7VjQ znE!8*Cld8PKCut)r92omf%WOgY+!>pAPK~BwM)*D!jUkRO7blbOGCZ`eME-eJ~^U` zaS}F271uU{P)liFSSd?}>MnxVfTTZ7+)@^9Y*w2-2{?3Rr^ZF#j~ zglN`}O)nM*<0RfnGQFGXF+d()cn}y z|2ZhE!@MWnNc>K{i=p>Ui=Nd)jO(d5-%%r|syCmHFB8XfL6z0{=$L);3#m@iY|_YW z;G*G$A_(EJx01ltRjwkcrRX5L<_jReOz^Q3bT)eEV%|5#T14O)eQan!Bz6YWc!Qp= zG({ac!O6Z1xxq>+B8K)V=Rmfr=nsdb80d#i2qFmb!bmkvnL64p$2o_@Te&(Vuv!f6sMrX8= zom&g62RTO6pYUlIhmtcv?84pjoGn78Gl5?TfLMJ7gMgy1Ga8&E>e z-W=0A+A^(t$>y3BTKPae_)S}wD-}H&*WE0T)yD7T=9C&kgB|sm>Q>?Ps#9%KWy&>my3z=L@y_ zaWUl0Mn*KnL__^lb<|&c(Afa$5_D!EP@_nik3!Z^m!6RSl3c#{TDGMC&bBylzsD1G z{bEu>rVAF)mNGUaAurXNQP(f;aG6^hgkA{|GY|e48*+vN$qgu8cBV44Q2MYF%X_^p zqep!FR84$5K&*L28wx=1sGoDp$GpZ$(`g>ot~f)HjK97dyKXN4X%yK9GN&VGY(1IuD-lNBOkSAs(mb-dFMf`{|xHF%60PJf%`A%P8aq4xvgGv ze){m*yWd#1Lun&)cN{_vEgFt^s;8s73HeOQ&PYNgDvsjT10K{0lAw)z3~xbXHge8n zdi(_yJv%KZBz+I}DYOh%`k>D~(bKl?R|dKiAQWi{D1YBB9WDalR?W%VOoMgKYcf|L zL`88FN|1;9i=l$#{O|-RN4Kd+m{l<5_H+u&AkAbrZ@$CztB2F1a1Cg%-!8TIYdE#E zPmcNwMJS&-m@3a7}@fd7h0TxRnIL5mCr!5|1eN>C-#$h1)o6V@O}S6 zjQ<2ydJYz@GN$pruv&f{@0oR}Qy{!QYfR_Ifje#fA@FlK4$y1LL2*m-l?E);V9}Dm zQ6%FN)LMGCf|PIYPODp3eBqjDofJiJP^kxdOHi~Dk2P%cpgH_>uj@O^E+jwia}_(V z3v6vfI6geEdlZ0Lfs?J!?yl2qbCD@Q)h`z&@*e83T#E$ZuW?C>u|YPi0 z$eyUzK@T@>L{5wCv5w4eBTTJ!9V_&p7;^+SVNj3j7!#S-j{0 z;+Nb;mU7&G$-2XV8ImY_&5yU7a^qdd4F%r7RIwR!1u7R|De!T|<3wk0o$*F|m8tsQ zPO|vzm0tmFH`&A{!^@}hL9kQV^d7)DLlBYHIC0w(&LtM@KF^as^N7|VSw%EI3`rBo* z@?q??kF@4L=HZ9~H{e%gYlT`qmT)g`uH`x8@~7`uC3mvOI(9k}sY2%qV(D!Amb~uq z&Q%d5Z0@h+zGCzhL-e!c33?8#mtnWsx6N^m*KoMF(g=?3+S`&_xdru9W4=TTLe4!Q zatBYTD_HYPpC&IMR5I$@UH%**J}IG<4~DLhvw~FNwKp0M0WB&Y`fRrv($MPu{0%lY z3lUqUSd4#+Jeo;|!lrcoK*P(Ngz_Qjdq-9ijZLYCEKhx9U2|t|jcR zz&Jx(E}|_1dsKk!;yEIP){r$u88J}gHoH3JPoyXT&7Uh1)F7vdvm+$POfgpQ8`_Bg z0<9N-OfEEU1d=)2pFyF|h`|dy-?fX(D&JNrmx}SP_19*MdV$B;fA`dpb-MoVpJcC! zypRYTQ{Kq~;_eLt#zI_Pb(}R|b&{ZTK^f)*!U5J@F)o8B&ESVH6BKd(gGOkx4KE_N zMBMBmpHrSfZgt~Aus~F+oDIVr8LeQeA2|4hJlks<_9pLt8G!>5g8=%)L96!<_+5!=kg-f^( zlg?2=YG+uU!ZQs|+|l9s1r(fdix6E5L)q#V^KqYiX<}_vH&njBcjWcRCe2Bv}e-R|w?agH0Z~bos?Qn&6y-e~BgbFtX z8;c>gL+0eO1feyTi=J$vh5@l`H3;H~LO(?)PZ1Q~Y49YGvjl3)RR{J-QLzAl`%A8~!X@8emgfjQp7_gpouXj5aIT-9}P-dlG2 zKsQxs{{eK1mQ697`LT*B!v9Kz@;)bJ{&Z(`?>m|%ACOTz_4Ke?*mP)h%4_Lz3P5se z@Z<Y{8E2C6__FzmCw`GtqIpD20|0@!i_!51ILqR+vk`lz0t*zvxhqSzBK{R-O zVIQo?;wW%hMe{wULjlyzA%9d}$R?82uI>niSyAl3OczCTbTa4M-q_|@6M~Bvx)*NKKpWgBg0_9bVcjJ&d zIJ+{D9!FTd^c%|(3!@$$WWFF8C6{UoWLn%K^g^K%Z=otkyI zS@D{H)8)n#TEir#gSV(rnm~yit(*+Ro3LR_|HzdA5p6o`!9U}Mj^2#wfl*?b%;uoF z!V}|KmLCE`vp|@}>TdbIaY*;eeno?;Il$%Nzg0H`BD!ms3#WyZ@*&|>Qy3&J%5`6% z;nE+ImApEyY{f*fEbLqNTK5`v4FsU6M$s@0y2H|ZM6TJ2QOFSW{7d{>wIgUKD^%ro zAEB#L6~$|a@y*8=W|N#en@t%~@?k^ZQI-Xj<_TdQS}bs!e+MX7?rxFi7>lzpisvLw zoJz`-wZLMA)bjM&e(v|;xNoN$%Kwm#rEr0fPD(4X@SJQA)$%26YBq%Yh}V^hnU_Wi zb+{SpV``+TT`3daTP7TIMryNmEI1D^?AcEO3u{%?bG90K^Z{uLredEAIX1K-j-iL^ zD)+Gf8liL!gE#mg-WiO10_gq|JsEM0QnNmJ(a*Gp%7(jcn!#dPGj*h&`*WoW;N_x7 z@25&|J0PfkeB|qfr#0WX7giu$9+hsfwYTkr<6%jEJiWDcy~qp&xkN%SF+eK`n4L;SmtO~f8cRN+gP2w35|8-{NW>2U9D{WHz#5q$ z-xJRraC6jnJaeH{79fiu9LNpMIq&kLCmv=|en}VcgDN;`5KfCgbm72O8e`jN;c$V! z(lS`NN z1|_Rw{671!b`6TGVQw;BTq@=B(>B|4lxjT*qW_6JP;G-6F$!jd)4!#Lai^@mAQETx z2qkZ;4YG!cpf&}RoMwHOJv&v`+2Rv3mz`qszSL{pFevhq9?$72oT;92G)YKK*q8!Y zQt|JIFUOTCjn#@^+d$7;i%&X1ffhA}Eo#GX6{i5Wp;}aarD?!tb_s|}MyWGgFyTHg zt?8osLc_}_c4|Ra?%dz5r)F0#u=%8+MdcxA+4mplEqt4KFw5m`1hsY!-&d zT;fryaX3~amCl*)P4PyBCc<_pW5^YEH8{L0S607`B5JVE+meEzD)MlrNg6dh+=Z0AMDM`1GcrjKp6_|QQ;@w;eI z#fmd?9F;NBRhUcS@;;y7984^0w%y#N|^n{a?OJd1ALutn5!l zq1fuGyV*(b1~Iozq;woc9|0y`m z?JA)`i7vaZb#ObU6J+uc(yA^wZtzE7n&1{>^W)Z}+|~yQG(%T#qo0_5Bm7nxf4u)% zLKYCegCl%-28Tl==EnZ}sB~kk9X}a$$I9#KzvhZ$JZZAeIh-k;xZHwzi*WiG?~Y~X z!_XeMKNIp)<$S~A6)VzWhshAd97$Z7#$}9za0vL+92*2}=~Q1_rN0-h>$-#-_u3{{ z;-w5*Y>(9&DN3)uq`OK^xye;0dRe&e5REoz*R?vBf+|vRwe8J0D*H2(&s*Vb;LQK` z3V#9%p|gYO+ner%(}xAWFQHxa0ZfxQN$O6JivgL?CL(SSbH*=c1du#a$VYsi*S7Nc zW;0>qu=`w5LNAEjQzM!Tc@#C)IXTW@Jdx)V4w%MFLs8w`)kJ z$j-#?JH$3vHGVkgS49Br_7nhr;2TOZ3^Fq}qX1-KV_#&C{hp)$UG}9d6+9?Kj?La2pLMCC2;6&7D-DKbfHDmlTWBu?cjO2o=O?073jV+$k+V3 zc)NO*$~F@w&j-~mb@d3^rQXlm8#d-Ra+`RmP)+lO;*7y;<`FW@fw~TNvpar=-N+^X zA|7@R~Fl&FfpY`Z4=#SS>}L+>y0;el7`bN-kYP+7)_nJld~;lhicvyC)d zIX7`t1&{!D+*{Ae4oHt*_k8K9Zk*Ew;JyWoaAZ+?5`4PMq5-yJ|GG78h8*UcYn;ap zS8{9q{nj1#2!+^=#1*P=u3=hUcdJGcp~3ZvF?D_c?7)#Ix>5tF+C zy3!pOQxpm}Rvs%g*jsDNtH&t&tDpTgDR!}J7Nvg@xMP6a#6F5LIo*PhQdV@k3lK;| zV6GUVK*IhA-1*@l;PXi3FVvR)X~F_6wFW6GR(&KjtxkoCJemtBrTnx&wb*c&u>!ES z&&qrsNePk>0o3fa4jRXOh$VrANE2(NmFxhB^}#1UZtBJZRB)T0DYSYV3G!`3r!%pX#p3}C{S+8Or z4Zp>S))->7GFyKep0)3Cg*14za5IaYbMAQ&LgU%^K-C?mwM=Zr)`W^8Ko31lesMdc z&}@eZ@tU44yFDp{U4n>KrZTe+^PuZ|ki8(sL6(TTrZo{u8^E}a_+o%bT&3>W*CP6Y z!nXw%u)wBTOWOc^P+Ijg@64*Acoj=yk@?4)BP0pqBAgMTDR%l(^!f!WO&N*P_AQfr|GQHUi_bNa83bbQ{RQ_j-~$)& z3rsrHNng`IE-=|YJaxv@Ch21|n=)$>y+Wxz6KN(sGV3+6wo1HhI|>xuJ7@w8@87*m z1gDe^f1X|RzE6w7+K>>^kcLY2K>;fW>LtcVP=l-xO?%u-==lu8+_>ZxlM8>ODT-^_*-xAa$Unu#)5^TYfyHQ8GOS5v1G zztAbaK+x)>FuIqYZ|r;?=~7$jAaih!$#%YTeG}8gvxY&72&9}GmfFDCh=-TknnSAL zejknQ|0@GU{63&QNyq}=A0Wn;KMrvsAwj4X_Dg_Kw5eTTXdYraI#-i;Y6T#+xh=*=>D70DO*PIj)fLT~W*el3A(Em`)c_I9}Wcd5&?;;q=ieIlc%4DTt-CFfbLaoyr? z83;p<-~0?4%KvV@4S%3;+`>7G32?&N&pWTFu_4VpU>xiSFQiCe82vcMkHrH7OxS6H z7ndAr&~R22lVpIzdYM}hg6{GvJiqg#dGvOX!7tmvWSLFwa?w;TgSg|m_Q+dF!O6U) zl#qO&qp3zB#Vfpa*7{U%nkjClEzXrC8R8H!U)$t?i07Z1S_|BYcB1;Y+F)V2>x}rm zw6R{r42Zn*zis!aN#C=V9SH2~TA`mf0lfF9-c!x(SyoJCf@Qb>c)H^1rj8DAHKQQ} z(ZjHZ>nK9KP-J~>AVJu2a_@^D)7R@{jK~xy8*+Ul%cZ-Bq35kIP~xl|BJTar4?W9( zG~uj2BQ4vhzY^^s>@^WBLODIvzaR$K#N;BEMGq8437Re-yE{8RC%SosUa<|^X1Jdl zsK9<^4=6b3@;1TJLu-c{C@=Ab#}o>B`X=N4ZO?5=Yw7NKCI80If>i`V_vEK6E}_&9 z3Zao!3b3-W6atni#!@H}5?LHp->zt=-Pu6gK$xo8#H8UgxLBpa%(k%*UlgT4pJ3FGu;##40`D;vPzCKnC*%c@211eLsZo!!Jj{8w*pM zvUPXLt7;&-Bv&9aHNl)!AKFogO(Yb(DCP>grJq*U$6dtHYl$Y*o+*I*0T*#lLsmQC zxncH!1s?%qkq**h9B*hiSFetm2zwl4kQhR$V|4pw`zq$k$8et+8i;Yy=NE-Zkhem^ ztyUx&`g}TpI0FV3+qkjKQ@e1M$VLS|L+!E8O23TL#qF73>}!DM50bz~A~Zdyhml~n z-g^%zBdq9x1f1pPF|%ti@W_o-N3t;QabN`kBj@x4k%OJL5}qgRAskQ&#&hx->2EP@F>-uR+`k5C?@BAksGPLtV#|6<_ad(KM`_J)4RY zR`iqDCc`u{oJQNC`MjcohVc%;$}PjaVIR8IUPW{zzQWayvQ0zPKuo1A5*^`gh0j8@ zpMAryuy9qx#7j>Bgei0a>>DHYdq@vn5a2={(!XJOsTi|1%7RXfpl4Qd#_-U>^qU5n zWm&t#NLeA=(if3^<(%|oAG`#aK{$E7?Y70=_2GO-@cB9Am8oWYPFzys%S;B{S$ItQ zcPW`1Aw?k+>4tr){v1!}r7@Lf&>e^N(2JOk=LGMT&+t+$N5jX6XbJ;l8!kP% z5P`NH(kL8^>ICxhxO})nAzUQHZH24$O60rB;a!b|>U;$(67cv^ejtLo49}x9&CdiT|PTE%+wbu$WXU_ z7(pK5$SBY07dsVOICOvJ=GTm^rY`fRa1E=fEa*Y$2V9|ah6P62L#uDhJ(~aFAYD7# zead6RKYrn>lj$OYWT$b29<4Nn1?GY<7sQiu_POoNUVv$j-_VOfWTT=ve$O+aL)c%~ zfug?=2obSX*E+Ceb)aZdel#mZQq+UH0l(%Zhp1ngTm1J2UdM%l)aK{A6(D%LdvhV)?3t^fQI#DFgg)&mp=)=N( z9>Om-_|RSNc$Ge!d4Q9H9|`8(D=qRVukdJtgD9NxUhNbP%0J(4l}8l^hRp~FLcLFSDT7J_xtkE*3InI#GP}{3YBZ% z<#Ww9gEk||Ik*ciD3vP?8G_u7=o_Q{Y_9*rP;hzDUIq0=&gjLX_KvMgm>MEMxml@aiQM~iBA9O7yBK%w2(RDd}s z20Cl8jMp5F>{CVq+grPr%T#zRTMuN!FA65hOlZid7?CU!8HQ8@Fcx@`5(qax#Y8gr zQYkAK_b8=2i>=?Qb4xksX@0{v2OiOszepSqe6`9W^g(n0>gUt6-%shXikpqb3Qc31NqJGtFZi zx{Q4UI){sAXf2O#vdw%rE5t5-;JgPtULxfH1Ii<~c0%V?;kz$Vl*mB#kWeZ#*qad~ zz(mhhA95~iU8t9YB_3D2a&bf=8yim(Umyadef>ic(AT;OfgY3OwbO@U>rk!^mao1n z-(^N);+M6`Ny}LjS|eZHW0Xs8sxFl;vsJfUHWQ6F=|{o&A@>wyVev;XH2Om9@F-xE zs2C-N;lvFE3rGxscYl~RAh{FJf2f~Ja=&G?k!WH=$693cV!{uS3q+StoMZRoIL2W9 zH)!Bdtu|A9wih@L1OC&!y@&GOn-sc=o953bSCvXg)H#caiL7g%YkkLS!*a>i9PEk( zh#n@@^jx0{ke5s%gi-ool4~Od_!vTK1RB%-dI|`Z{1#I2-^Yf*Yr?I(L`wTpVCwu2 zPV0VCuk*|%vEJqu%Y%oZFV=|?XT%MoeFpTQXk`Sfc+ck0IC;vGy+ljc!kU-jbJ^5; zqv_DK+|PhVSMC1))GlNXJTnCdbRKGTy-YY$tyF-AH=knBIqs!owrw zRCrFyOUf9tOBY6g>5*i~;E;kjJB%b+^Xk#a(%|b_IYhH}Vp#WDG%(`DY9H8_Jg+

gdjDLpNl3DP!?Z(7fug|^o6n+6wx`{OH(>mGw#qf>v2#b{%kFhBBBU{w{XYrr@>+Fe#D~lG7l_>}7_!B0CbQzzd1X!! zegU4i8u#e8>0k(M;Ua{dR;~*@uH5~N6HA2cVvUkNN`gQ(cD3`bq*@tG>0T8sawST! zP8RXjDzI=CNp*JTO)1h!s9`N8hDvQS6_PETRtl&Vg4)PFq0;46C|6WGbT~j<0Cb+F z6NTTc#C5obuR9$kwm#1Bh3f{&6|{~n$dEHs(mzLw>K1P{&>Pld|9m7g!;Smq0JJIL zC+zrib;*}^tu1laX;ha8gYfZTaDa=h`}HCrpXkFE70SfuD=;D}1}FZMvswDpe-$6j zmqo74s()SxoCkLkb(#SvLbbpvp|m{I@vD%yzB5oPcS#-#7-l9i{x{;u%>EzXb-4!k z2>GUm#U&=Z9tXLcsb7JFxy(39-kiAjK%BpC)gFVLB7gcwh_z%AXpkUiUyQbU4iF5` zON(7bLaS)IWmYw~W*9Z)(&KGCWFcW}FGQ8`sMxYxUZE&=pWfs51QeB0x9E_2(oVMy z8d6aO*}J;{+KjyJWl2S0nkG^e5W|?&3T6o`IIDuV1ebwIZ5WUAxXzQ$`RMqaHtwbKB$Ou@ zOPGf?R5WBi-w@sKaXuR>Xu|r*HDA+qsjIBn@7lI0oV!SvS&JQ#T^(n+pz0XZ*GR8o z#~chHbi1Gp;%{d;(Oi<{FEFB^P~4wBc$48qNiY_Zm{qgrQONP3EOmQV9ebS0wGFbEL z_PqPlKk%=Oxlo9A&<0da-Zd&y8gJYDnV-&czj30q6&Bp>?J7Ix`MgKc&(X;IDkM)x zIkiiM@In%CQ{`o9k>;drZGy>l2W0YQH>P%e*vLCGC9mOJHWh}BRK`22=NR(_Dm`Ge zTgiJ(rn59W6+X?p$FVdN!zjz?$$LM@WE;e4%W&JY_H-TOO@~CsD~k&qH{E(($0YY{ zJ}yqI>jV(e&TUL!})u0ovp6Wxh7+Fz#u$|wTZ zE=&pHHoiuKXt6`1MYK(S_yF}e+5PDk7&fE+7C%;qbvcWn{aTs|u=ZM#U)ua8k|Oau z&Y3rbZ$@$qAG13888f;#mk9YK&~jV&qcI^=xr1Y9uK2jn1*c4KE^Jl4Yog|``>%x+ zmFs(L&#IEIB_H(uU`%x;cpqNCwdmjXydhQzA?JX%Nr<=`WI`{V-ZDg zA7T?DG6Us~MgM%a06##$zqPm8oE4O?%gdjMoqn6c(%8veHM1drK} zP)y4G8lAbxt+lJc_c$Y+(F(2vMVK!h`0~S?Fz$vZ0IfVQ5-*Xru9FRBw@XSqR)_ps zq3#-h-GW+V;k5%sQQZmIWf~+kx1kF&bOY>Qnp%U3!}{cG^`}43aTlF&^j)%FvGWDN zY+E=$xRu)6I5%f>UeV26$K{r6v}Yy+8|z333u>15R^-~~g<|e=Te^l~nifnEQZ>>3 zWNb)A1ATqQ8Hoy5d{#K8?0Zq&JHybRb`J7U$;OIG!B1)GQs9M0wI$T;7-PyAeAcQf z!RjLf;XvuT1?qn9tICxHeHe3QfS=5vqq7=A)0CH$DDD*9GQL&0JW@PUX<&7DN82-i zKYv@lNtJ(U22NLH&IVPV%_wn%M)aw*maUp_FQC8;b)_i%wbI1I95648nXAf_ki*G} zbE0UUYH2l!jM4Tbwp$bcau+mDcM=v8TD3q37_3IW8f55rs6PcHrPY`Sid;wRTuOr@ zjVgq9WmP$}nie8Z_TJNV&}=G7rOq2OaD5j}6D9-{1V728kt4#5_S!}FJDZ#z;VuyN z6zZ_$%_FXt(Mm3>lXaE#td#6^T=0q2FoJ?cIh$m#8cNan%oWqAhHdp&gap{;RJK9|BSf+Tt`&qbZ!44il` zDM_%T?~k#}LxYA~fMa#l!}S+`a3giQ`A7y*`ndkRd@;J#<8#x1EXNeZl|mmL!l-sj zP??h# z@jUsaV%t3?I^p;hPuYeJ1VilEq?!qk7&P{R04V92;$yXe1tL}e5}MKC5jTpoX*oa} z(_k+wHn^f4EXXQQH?W7qj48RoK3JhmS}KgUxoV!HtOP?R}@BwA5ygSgcrc14<3 zu>|B$KH)vAF3}uYBj@@`+y1P-Pm717rMGk`&RaNXVL&K54HLwT74#nM)upN*>J@Um zU$;C9_JFPfbxQN1FECTiz9X&XVhks>;2;Jp$WF4r+!>8wKZ4fA2tUFvE=qz8E;OSZ zOz8sR;lek;TNC^B9-61O#<{EI5~>K5?~@eZWYckKc$6Xq3yyTrXlmWmdeI2HZ{h+E zP3I%kOc@UgFoZ)zO`;FoGTL9dl{^xGO*qqSz(48}%#p-;yK{e5)y&O}D}kH;<*9wl z&e4MW<7X|vZQnS<^93BdZs5wH`f$DLL5G7f(uU>kRblBzNf*dG%qLuJq36RFA}Y9O zq}})4Sa_m&&(5aB#qI-rkj9m&{-L9Pcxf(nvm~NpbD00IO#HRmS7jSm(MRYY9>F}M z8jP>1^XTfA z1`oa0r!0b%6Qr{oR@c^<{7$xdaweplKmF7jQJ5mE`zzs{F1GjX75G@@nxdV8NMecC ziun;_Op^<8o1;fa>%}i&jEiGEK!1QlVaV3NoYLR=1yW|HL#ukY3mvm-=>K zxklII*5ngdI)T%(pel9@`{a_kv}d06XHTK-OQ9B%3TW0<*0^F9S~F?Zb&0D==Ymvyq4 zI@;A3D;aqDeCc@2DdpGX1U$a4Sf#yK_aU}RZ7h}NA5T)UwdzdEg0Uv`4zlp)VeXDIP3oS^V@~WalUa$h9g=t1vf&IgTIV~nUQtU=LzdcKm$7QQF5+X=y?-xVzm`c@s55b zTF#y*P7?Q*P!1v4F&$!`)$Bpch(XXfra%vDp(V0?!$lyrZ7-3IVazaUR?5rT1`DWI z^kQ+{sA$}1xjeY)emBPo*&!1*4A{W>;ctAV>?`X>=u9D8jmj>8) zQxcBCG-ko90J_D`I_fl~LJ6iqHMh%o3Dr=2Cbmi)Os_)%NjsAq*u&4A?(D?ff`j^W z7ltYg+(opdrVUHjt^p#zY5LQ+zC#q0WI>};7n({RX-Hw$?t>IT)+_EQCTkH%= zD5m^vG!iFNSak3jx8YwNNTGk$vNo)AaHc{}2!nsm z2$l(AeXZuMc+M|oTOz=g_*i+7@e>auH+E~T{vID09Q+4feA{}Iq{FW{Rrf^#Cx*&l zp+~>Yg(6ty({2-M+eE@BIx8ZAwp_4!45`v0oj>BrsTNo{C%216T(B!HinWwedUyuh z+*y6SU&Yw*qkc1YL@w8ebXVbj=ZMA`Tw$D4q9>fY~S>9;9Rk|a@f?YW@k_;NdX_mmo{#<6c(22-=3 zz5Td|f2cQwsS#&9*>MTk#kkqJ(j8_|4;Z|Vu<Ag5kg)3$-<0)@7vUYHQDbOVC%N#j&4Q6Viu~GDRE!fSVw9Ll+hk#zjuF)56UZHz0oHozluWp2gSP3%M0VkqCt8M6ywcGwC&z%JC$1$=HGgitsJ5`(pN4TE?ef% zx6A(0@+~B9wA-)g+XqtD8{^_vDf3mc8f&{P>8Z*c~ z(z(OHcU-J`x|zFi^i}INIe1} z9WUMhkFz`k>(vEUC!|^Cf$n28uMTMgPJXN2}2d$oN7@Q79{a3z_Ck@1g_Kn6hD0;L?3S+=2Q zXi`0{Sr#c_LY$MBo3(ede?R}(-!xObkgter8CV;|D_oxu&iCv}`68iVi0klK_ddy9 zh^oGI-!|GX_xn9hH8$8sKzeQ_F-cv=V{5T42pN+Cjnh zi8A`cB*9eAH{kQh1d-iuh&>ytOyYu%vM`r2v@AEof$mz4dzDvAR0@I5zZIy zS;PuWtrZmqLBxdK)BI16{fF>F?Gf@X+d(v9h@a*k}EmuDW3${6qa zjw&#@D2`dUn(Dg++k_#FEw6#6IC;h#okf+W{GwktVgTdoF1RaOJQttaf%~0~B9tYO z0T={2n2m?spU5=Bvsd_9Uoi`OKsX1KZ8RthcxoDgL3uU$k^4Nk?6nUK%-d{fv=)-{ zoNyh^pfs690Iz}$Cc+GcLP{%qK=b{7)~FV|D=cFjajpe(j#!t_8e+#F6$wp_BZ8(O zkm!g0_#D%t@fe>I)!0q9a+x;aj)gWcKj=J}Sy%0&u)X0o0pJ?85HR%^K4X#6XeiS{5E^Q1^(I-Olx0+ z!9?rysVf77gYhrE+^!A|_l?tVKh#stth+Z0sJ(}}E-~mDa!-q=1Fz$3mX@j6(uoWb zg7#g+Q2@`55An2Aw8mR%w`4mlpM&8L|J(OG%3g%p9Q&wRsOiJ|wg+mkMz8kVC%+yTfjoEfmGr zg+v7Q>3l^(Z13B$s1Aha5QH(gRnvr7pZ67HAcPRr)&5CfD8Ap${rqR#yaRqEAXUg$ zHkkxFLoW$>>xXzvW`5X8F{}*1sSqfLS}Jd}d+u9fiV!a4h;~ zPF{{*gxnzSyWOXO6D>Sv|E+%7f6b`auVULb0nayZKkEURY_ahU+flo`5+R`H8}Toz zF?YZq$@E?$hU*qE%lCgCSp09vp+DkFQ&81gwgUGTucuW$$DAJ&QcQtU4@B&U#@CRP zMgOCe+?b81zrsLwaE}z|H_LW7uXH)sV{ZXJfE1KA15bsr$E%scXfaa7G89P#UX!Vx zE=moI&s-N+*b*c5_BVn%3Ss5n{MeIW?+CGP?8qb8j^R31Yg4nn3Mh%zzsTEYyJDdy9nt@5GC8z)K_Fv72LnvF(*Xq?d*Ht z75d-tW=*Y){HUXNy@X_8VPWDP>bd_9`5G30>$pLjj96c(Y8`7hD|UsZ_7yuzPb|E@ z*;lyYR#$yL%Ei`M<{%2^stFsYssqHW^P^#~Kt}bK2~>CXC1a zyENN70g1Q^S5EDmE6r1OnL%SBfRltig#j}+KNe>$!B;FHkK7Xb0Y7)2*o@IPcjT+5 zS-=!v@wrZlUc7b6N&kq)(AAwUyNaDHDT46zuY&6IAFC%?VE)EqIqp^r>L4gYP0sA6 zj}}pdsoFx0Kmc`geCv(cO0N8|D*{w`FqXEjA{R7w5~uNsp93FqXk!!;@&^_dB)Jy! zV2q&@a|h@@HbYTd^e_QcMrxG5GjkBcBLn>NsXZdE(a{MTjA+v&P(I;(t z1VOnFg;S{b&_oMU5(zYDMdjl}H}y}T7OG?e`R;1T-&n+l4YDO_S=YG zVP~M9#)vCaEi?r%nNj7r^#}O9hnr}>G(=YU;Ag)#2>9Mzak$b}7jAjPfY<%5)i?GF z84!JQsTBZP#}AjTz;X$c{|dq#DwoDz73#a7!)o&E8u@%uZvc)0A+<~gM0RsO&WDT! zFl_{_`N43UyfQ;x`i<22L*p_$*NxjvZZEuyA}`A5VwCyUciRF7P@ul`rY6Yq6cw>q zyuH^8{K`J5gr9iA8UzR^?mQVuvwc2ulOSHGd-bH*+!})r-C%8?;RjBdWY$rfE|YT# z(e5$vh%(dp`&7;NlP*CPRB~@6IirpGLts~=)<8=Uu5c!s2jdi1JE)JZP;?G>WctpI z*kQ;*PjjHfhj-nwHqnLPK-bd)iAyv+gcN-l;8e-LilU<%q7t{3&@oGJ%S-}I(HThz z`%Dvnx2mAmI=6Ex{RjK1_@BW zuX3^jyj&A8>Pd0Y_TlDO0Y+VcVjOQFP9DdSeG=Y$YKqg+n9M0m{ zaRn5Z4~k!6E&jfeh0r^ltvINy=clOztW5|3e1;}Q*gDuwk~M%`HRH0W#LD$kSL8o% zuhEU~RGOH$)$@XokJ5N}W@8N?{anIqYnmidc5KTX>#qRtw%ne(le9Q=+d;656IP!E zd+XpV8(D*6)pXG>TWVH>Zc;sh`!P8*#572shYMw?-(9V$ALyInQ}HgY8cXluj~98Y zRAlI;crtRdkj`^yv0JwXh7~fX^IJ`XDT3W8?2|n96_zwO(A)E&FVH?Wl)@zty@j{< z?rpqcyGyHO33%A{Qx8i1q+e!*_?+zVYwQsbW>x^qUh!~}-0_fG5qPfSuUIGkH?GUJ*J$14*jDv(Z_LINO1N6%7TuTKIu~+ygT8rdmf1nA)_2vPZRekK9o>Gz0>khvExU1=`L} zan5+ReC3KW#(LYoWZGAA(HnkV^yun8)sidm-N9!z+JdyrdKKi(qUjURGXF2TG~-%Y zJqy|N)-U_bdfVn<`qc4_cH}+jWdw7<$ZS8@#F(t;VP@z z!B~JP+gx*iJCcq)BZMgovBECchUTz2MFLDXf|{A=3*JyHzS4(VZ2 z&)|Q^oh^xev6`q$ttFd3P@a^}fjp+alp}Eq;Z2N2F+mMSp$1zU_?Py60oFw8+D-k$ zvyd~XEhd&R(M#&iqs(8$psbH8Fz~axkmyB4Myfauztwa0(P03=#ZkS81chOp(CIo{ zgq2*6;$vTT!Yd>)X)A@J70xAyY&ZOa<3LMnDgOhp4~6T*yL)S@R$2VZVfJ9ZJ;YvF(1grxF zA)srn&luu>Ia(}|y_ys2HNpljHL27TlgnWETKGW93wPUdzyw;Uh{%esVt{UVE%ttg z+%M&qMd?!U!IY{n-D0y+{Kswk|0QUxuO=aon>MFk<*-Cp(#^6kG!X5aMyf3LO!G z;Lq=Rh(#$L=KwzbT7k2QLa2r=Vkb+5-;=~?Dzl*$bV5so?k%1A>2SldBe~mp{(($sSMR$ZDEi0#qP;8e@L&_H#x&@Pz)$*C6p!T#4-C1FbkGn}OQuELBNBAVMh;FL z)p#X|vPV-J$7ibpy85}LBB5L)jPvL{0h^acAW$ZS>7wY!a}1T;ei9xANbC#dzA`E5 zI^Np&5>J4D$e*-Qi^yJMUXZK3!0Mu|30&kkhDi)Q?Lmtlg?Y#XsranfTG70pWNd7= zPk9HmWL1}m0ymvc$VsSxpXjjkk#v!s(rOGn;D_E6!;#`XEQw%t&(v_)qCp`+y|cG^ z(x?2pVC@b(gfx!8_37<8*C|i^k;IhrzH=RedlQ+$S zN-FxjeNb1FCXshv4Y@&Ii@|SVhcg&bM6=(9A;SyevD$CSK9{#Zt^X&?rSDamG{Onu z)w!M;N_A~U1i%GYbsSvb=E5hAvz7WgE<$(T2O^FS?nl~DjrVD6--1E1nj3suc8~*V zYT#^-HF^#rSR)y(YQVgU#Xvz(Qq5;cI+PL$oWb&Z+6ejSrO^_SxE=DKy9D1AkLiyD z@!3EXmDD)Q2E4v8A^IH#Q6F_^K6p#@s7%t9FuK5pMwi2yP_bee8d@KxVkKqR``a4V zUIR^W2{jbbVqgo;kEQ4<6@7QOx33(f{(g^XoKPnl)U1YAPAO$F;Sfm(H^YI6e(XGT zkaG&8z`seI3Z$rwNnic@v}3K?sAM5Z+@VZZz4L$O`|?)z%y)9!Wfg2$Ehs{^AWamT zlAGTs-ck$_lLazrS>rO9G{2k`;UrXcC2t*I;`sEgN^}mrzNmy)1A2I+I~m5?;i5Z+T*(hVtM$Wb#-~WyePDKs=_E$&9RQ=!Z4pVV@f)PVcL(s3K}QA^#kQV z%$Uwv7%>*s*bhs1=gp|=xJh8v#M{NYby?6~mR{riL!MZkqV zy&n$`?aW)6+%DS^i-jFT>B;XBmqO-Y6VdPi%UHhb3LF!piz?9Oqq@aMH}64`eL{x$ zTBDlMK+sZekO|ul(s2GZyOc;eT=e`J8H7zsDJ~0QD zTn1Hf@bzWw5qeQ+tKU_bGy13#Or_=@Y8Y-HNWEs5dSD4vjw&d}kI0_~9bu~u_H*K8 z!IcE_l6zNrgLL#8ruV+hW~w2-S4BP3e0=;1laQN&c*;flwY4-dSVZ7cHVAJVarcF| z|9Sgm%!}_6m@ZhM3ig=9s-EVK1bBK~rBk;PIB#TTva9Q{3>LHNtL0Z@b1)0L7Ti8l zbG81I2OXP@M3Bh@QYAY%U)y7u+O*dng%F=^ix3+^J~TGDnrNA01n02rUKH0*g^haU zUNW{UE9$(39wGRJVWs?|1&|lnq{lakk3bNDr`nQv>l1u-B%Dc<77?2CFXVA`0huDc#f=nAl=U9?T zIfHs}!N3|sOs+Y2oWj#zTL6SxZTi@Wz2;1sstaq!u|+0oH!g3~y;YuSNuIHI2BKC%5=?H(eZ9WIU1AX7!-q1^?hkoUqKhS134~@Gy#xM9nMD z;@!EW&N~F`%s)a8&F;k-b!xkeG3gTzp#EKfhqjKL+RCF-Ee=5{{$pB*mGQ}D>Fc2@T9f`1Iy91x;P+2gB(#KP@?fmuR z8`wql8a6tw1_RyGrMHus-C?zk^-IssX;W-?AW}I1xaEeamSh$V@MlC;84p+iV;1M5 zp);_yZpa8wWyavV9_rDl65w~4pVj1pz8M@&9)Z1Ov6Dp2HA*=}y-Wv)^{OM_L=I!> z6wk+`HU$nwudRtlS5Lz*Ny^Jw9?Kn5E7` zrCtDNZ|-=ErM)Xv(X^cs`wztUEy~n*Y&+3fmX0kUf|z1UlIe+dH5_jy(lD)Lx5Ig0 zzk&A)5}oi7m*tly*&%kWZ+@Ws0p5{S+C7};yAa?5(=q_0WGp}gkbX`}sfV5=kNrExZgzwZS(sP6=k{UMx0n12YB@k1cQJgbYr-qJgY$@|5Mu9s$a0tM~`BtmR zMvx_rCyVitE{sP$W~IqoL!OGfv4~+}sFXn6_dWZC^h$&ums|vk0pqm2wi?sKu#)A? zw0D^*L+J{w#V%^O!W-r@5Chm&rk@jqrh^;0$2m>8=Ey3{^sjOH zl6)q^`==^Vv6-W*1qW&xn~_1}y|d(DG_dNlm#XpWo{qA$ zQ24mNdyrNZ;=DI*;Z`6d0Mqa6JXaw^pS`*y2ZlsoDX%3-c#adY6>p{+#P{!tlvTOb zIJ*3bn%#7XVyos{UO1*V6}s3Gxo@L$j0+^k&WRyu4;^6cD|y*-*g18I9(!f8AIQu^ zzzpF_svyIjvUFg`OwQb0R~=f7A)00nrIBxvV5wmdnU*rPiGd>{hXprXRvG|bJW=pW*F(d}`RzCQz zMHaE{kRzJ}qm1s^585#gJg-Ba+OzP0kP`zb%_&QO;38#?cS&crCoZ~$1lW4HCyLbV z8}0+N^PRcY)7+k&d(Hg0_DN(V@h=t$VR1`j3Df*JN?U{t4O}eE%3Ir+yawpw2?P%q zE#xw*5HUbMtKJ1;1O6X&m&gb|D!3Ol)s*O1o@>SXtxVWg(2hOH2CuGJ_0MHf5QB^8 zLc8qn5UZ-M8C2lJT#TQt$Q3Yu?*zD3s4Cj#(|}!^=aGTOk5?R|fQMI9u)LIX{B7@@ zFcGp>4OYng5E-)0PrJjaf~WPBg()@m^jdVhN_Wv$qH3Spj6R`NdWm8c@k|E1n$`14 z<1nXQ^Nc8bbcNW7swZ%cheFi!nP$KDGJby-`4(O4AneMIEq{Hl6a z^eo8S(dd0#581g;%SS(g7ZdiMBR<>18A`3JnufltD#sMpUrZd`i0md zqH%_6h)qT+=wi?G!+F9fRpIswAjm0|r_APVWdyL01I|~*wxPI>-Q2e#88z$qOhls! zING(B3ZeVeoo90ITYbBt;x%)omW$>y}TnQ9U911|fZ zxn(n;NbF+k3AV#gDCzzx&r^!47~ileohPTEFws-fo<=9Qg_S11qczP<8P*aYoYUWQ z*pgHcH30!yM#p}mgTB|(A%T;Hf^{g|*F52m166E1{bGT8fC9lI>0KS~rk>z*?4?+8 zkh6aoR}y5^HhJ}yXwia&@5qS&LZm1D{5fHLqUi24<(ZS&kOfrAnw>$tlR%9x(QTXpf#-B`Kavv z?!Ag#Li7^F4^#uk|7j9@3tkWNy-(0D*BYEndTv9#t7pEEWe=L zt~%T%1{ll~X=h*6NJxjGtw`g%BFM>~^L0yo>h&57NEaP#Iszi` zg$UEf9q%`t6O(rT+lL9J*oElL3%PDWQx5Scp~9AZ4{=38npt^4MtJ4^ci7HkD@tam zcgZC}LNn7nTa2arP;8BOPWF=ah?)~EOY&c)jQy5ZzJ1?CFm;La`6VrJPPsiJ${4PG zQ&l;e+gRDD4?C?yZJ2BTLU%AJo5Is||0`pUrr0=s*SeC%nh0Gc= zN(Wa$fTlXvao4A>E)85F&|KW?h&Vag7zu(e@dQ7egnReErtHY$JBMll0v$rP7cJ!q z2*q+i?t2DY!2f%tl(HzLae%n*L_?VOvruH+uqXuy*oulgGolvv-I5$9>e2cq`cQDE7&8ME4x3o7iU zGi7aDr%va>BY_Bqgp6GAaq7UHMhrw7$wggIis-<=r3RGRuo_;3Z=k}kVv>N{SE(lE z>XaO6ULR;9#iv2iDXM#Y_%xm5n}LI0rfeOYbqt%8mwql0!J)c!u3js#o74kOh4F!} z@mzNZy(HyEqbJzB8z?G>n8|}tG8u@6p3Z38#*XU)z{+VyF5SH%&>hvWEB zeyAUOT$-^cynWEHzt1JPvOIAC_cMbGrkmfO9D$lpBkF`TPoqs$%5u*fERrreThqX41Ljnw311%W9 z2MG43EHoWWZWC@lwNZz8JB8)K{i>$nT2RMc-q^1XxrU!x2>?Vrl2h1-i>4C47IJ>c zAwOi3I&eP53z$g@;0Q)y%RWjdfx=^L#pKvz+)<6Q@p)ov)d?oU7iDWinr4~+p#>tR z+kXa{^8Ph6D7P&7I(s=G#++7iQ;iQKIMB0>KM$Q6;9I~&*;h_E><&>b*FYv>V3+)C zCdrVvXAz8*pb>>VX85BfA3#$qKAELROmPaZ)4?7LypbfYWVc~LN4#Qo-TFJDA3NCJ zf`-=O48}9;Fwb2Gu$5O(#+7}JOPoQ%51>5%h3rP6lJ_LQIcx|oKiAXj9mz{ihp8k$ zo7LrZ2s&-)Wt&Sl7BohV(OCGnD|x@g?3yYHyYg5AP6DEFM?e6=w&(oH(rPFQ%ML<> zu_v&+Yk%tKYBT&T8cLPaP2D^zp^+9(pMp;CRNzWLf z^v5{2D&t4@c@%Oj-EF=4V0sRA`2?1cQa((g-lS^v4G9>r07^FwRp;e9U0j7`zdc;- zIxc&N(A2Nli+73`T=YQYSAFSf`jlQZBqc&;paCRDeypNPIz2zy;zSsJ+tb1|yTkprXcv zu$*N{0vn3s{WQb+3}6w#LNQzbskY6Xa~e`P%OOp|0FYRnwHEEnukg1Q$ku#ye*f7LLg8?I!{m#b|br*y1ILa`IE3HK%YXAHbWtT@#P#?#h-1A&9;D`DZ*a zV(K2qa}ZQly^k|>WdkWnQ$(DF94M#g}NRL4r>+>&5q0q1d6Tb)Vr&eqez;RN-LvtveTYMz*HR zX;ww__$P_%to+*^C@hMK-{Nr)gtsbUX!g{iA_(wNA4<5=e$!C8Jo?b#A;)BE;VVha z%q_DpT&2M^eTABL>3k3R2m=+AD1XurBX{{4X)=(-a`G3=#;?+b*{+AJO9;?{UYlA= zG`1&O@@3_-QM~7QidPg(W`Iu>?B5B5x`rqHi#KZWvH+Y|l7HXKCTf{{x@Q3N~pFn>ghD!={plH|8c2TWCZ_b2JNUjjCIL z$zigx9RuZdv~#OF7Kooi+nT7)u3u2RTJ<2p0qTU+&`v^{-T%m#F10Nx45gw_kM#o| z-vGyBeu#ebfiA*HFQ=GusW_iRsgpZE$#SM=O8N(m&lDw=Ci~~yLPHM1VE3$kfxv73 zs}MrDx=MDAXN3=|PDzZXfsazCc{smOd?~QX*|f#)>(zBMPb3}&dz_(EEr561ynYqK z7s>th{YUR6l=oj?2_72UFU?nCZ)dC15^{eq8bd|y^sdZN4=5*;sAA! zEW>&l(|lfR%VF;#Z<;pdF)Z2DMW&hcp;&Zdfy=QIvVFE~nRCkF92qkz29erJLF;&Q z(j?W*o1Y|SCW^0C$BWj8Q3QW(;UBi<4;HW;T1_dB|%g5E+v z#P-;bZdpA;!6XL!Wc>fc2!ISaKovl-*A%H@3&~9}>}Jo6us-c`zFhZIA;Ea}-!uH% zZ{lNE?Y?b%RZ@B09;{Z-9OC9DNJtMsO2C(|ve}4(bcwLlRBC*|Vb}XqRKl7|X7@+@ z2LS^T8|{g4IG(TN(~!=Br@%$xF#}I7pcFi(Du5kA*zi_96E(8#WexEJ@7ch?j>F(c z+J)l?u=kD)dv)zAoy*W|SPuIKoi@O_w)GOa`pb=FDs{;Nm{+5m48yBgs@rLMC|2Y9 zb4#u%TOR1J4gOGEbX^x&juV!efu*g$O>n@v(cu4>QW`P%)umB_DJK4lu&3~+O_Tl(fc*nN%_S`hR*`?e# zhPztPhT?*5M#RQu{r!{&P|)f9uD+TJLb=*}W5S>Fs#L$|i*mjG2{p={%=z-Ut-YQ|~t7i^y~p z1MD%%c(H9-o4`PpL@jLwH%66nFNq4Sa+d`P-nZlYM2<5;z%ZzOwLfzmtQTU=?+9rM zsb!QJhn%Jz16wX+EHjfRx27d3RnUyTJP@F)%eijhV{5x8RV9~Q^bcV)dgqo@GA(31 zmhF>*Naz9IbvY|9K-v@ZL3~PfO1xXH-HBiwjSq?j(i4sn2ni~?!48&i*&Chzp(q}_ zzH<78qMtF@xoV!7Kkey{s_=7-DY3x}QwqQ0G0Ag+YgE_O+z&zaXWA3Tv=QQJm~p=U z2FpN|D%@w$`b_X}!S<4j6&mzblzf4z(b`k-iG0fC>@vJbQf71r)b|J%9W#>xYvHm^ zl{uIFQ6#fEs~Sb773tfG;-~Omm}y#MlXWocouM0JnJIdn=sPQM4Jj|}b)wc)@c}~s zqV{>|MA9Z=vGJZ%QmdlmVf2G840+37Xs}sr)s%|sR39dHerKTtX2jS2O4~^^F$a@5 zr%vL{NS7_g6pyiB5e|q>B0~h?#ggK}5^dn_u4|8|;zaTDDRHzD8H2Ay}iGk zd9g-TC4w*G$Sk7C$?;tE>Sw&N>#kg?hIeW{MmuiT-i6fmwURjK z;cxsVQJk_&Cn@x&w;I*UOyBIRnzRz>IJ9e|2k()RWR$5*JOE6BBRe6xVd;Vplp4Mn zJ}-%{5c?3spAreZ-JY(S9c&3hrmv5A2ZOTLnVF0yY83ep$QSeJU8Yr0VXEQz0>xA4 zO`hPFXBNzMIEFSm5CN7YQ}P7+E*aI|PUz~Rs|Ea{G`X3mV)vrrc^PgU4Xu0$imbb>yP_WeD#Y94p6z=_lR8aKE#Ql+u&UPsZ z9@jT#AN<>T zzPHw84VJ}8r-pJEyVPg9GjdwsQ)bbH9^QyY#@H`q^3}1fa*hkr3c#|Yh_kL%n0?7imMDpuua;JOLJx`|MFmOq+%?WT{QyC@m$Fj{tGw2*j!*W9=;ye#r zAE-XIzg=MaocTw5{QwMFnjv%V$jpmpLzhyyWwA&6<4oCje_K4s(6kxh)TjJXN?T40 zucu;hsWu!|O+ym~y(l}~263tNgXb%`rS+AP22G9&iVhhhiNmAIcAs;|V3#lsNx7g9 z`LhCgDU0`zh|c53;{+%A`W_*kzoEtS@oSwA$$0DoO*VC>4f#a~w(D#uGB+!xz<8{q zyJ%D2ng-cn{-_teAUFllqs$r6+7z4_?^eR?a^O(4gvTUC{Q0uxkWAW97~{(mkE|AP zwUveMCOQ+cz;)Nm&h(HR2u@JBd(!BbJ6~90G*{5di4)+qnnb4ET*d+FhBELI69d+x zV=N>W-4DmnEBeg(Btprcz{ncNK}raD3>c@8rjJ#2rHO@Zz2fFZysgcu1cvCb)Ft$9 z6~7Suz?CcNl1sYURh0~8+K#WsH)1SgOnI)R4Fr3wvKLFdUAmE7NZ3vv-q1?^7K`zo zLR}_rhJl$$bR z2lFvh8-AdZtU{Ti&^i)XL3V}-=dl4&ry|M|JuoaRLghPQNcHpN)%l`nx?WwxF<%;` zAKmWKJ)~$KaT{*G#_UPh{8-Id{QsaN$Ty#OWy*2LhETkyt|bwRLx85{<{T*JY=f^> zt=UkYqnXzT>L!qCr8m`k z&Dv48OTycI)r4_&hP>g758fR;?8I(tL>p^|k28NhGSdZ{cxSSB?>p#Vt8R1gROTRs5IYI43tbu$yHXgi;0bY0j;Vs01F@E;4Ulto#L&v6p6^>^I6()Dfq$klT9|qV z=f>3%vs2Yn2-_R_11ZIA)-m)FvzJ=6OLexcJLZE9(&GMt0i!FNfFg{d4dX91#*r%y z&dH=>1MCp&^`gA`$XCk$-zKgl8YSk$oEKfxt6&(~08K!$zmA5^xj2JY6X3j0SJ2g_=hf{zbYM2G?(2dU zF`v(_c=7A6x8X)*x#qHK+J#@~H3-2Ue?8G1>e}E5tFDUwaP7Zusx(cMmPf`-t~G*p z8N;?An0wL6hc5C%N+ST3bBeR38|bTlbcE^$CV;gU_2P@9hW>&vIS?{;-u8KIy`Z<}QTa*_6#KYdow0&Ny`Vv(>4` z-VJ(ff&Y?xd8_mVCyT>ytGuA!RHfh+lc^F~9F@l|2|e7sgZM`dkw`Mgmij6{F1RGX z|9-OIbnodp`M4kRISAEjaO>vMr7NaFv%dL>>pG;GaDKNDnyO&jwrPP`m7Rm!^OqMl z5Yji%os+M!GT}phE9<0`i5=y+J!z1=4-eVl24bbjSHi#^KL=N4+=IO7sgJFyws(#z zuAPdcq;98#0><1Yv0@VX%spo$?d<3xU)W1$$k@Y`$p&h*@oh&h5@itQT`2|A8dD0g z`2yEvzV0hq2vn~=b+MdUyg6jp2Mj{YWSkf@V;Kg7IaBRB{g#`bXX4ze8Iz&=gDR=K zg&1F2B}Q-9)&y$e1J-5`ZLcSf_~R`LUGcg|@R{mDJK;s~7w9+h@?*bROgQoZYM8c} z@W?&IiGf*$)~cmFB_57tn=`+f4b2?O+j7w>T)aXTVq%l_aM8;~F(mny?ZPwy-F%nX zTWPbu{`Duy@>qefpvmO-qHRrP%GzGb>Z4)^6@c=bREemN8Rmb@&IMqdLGusSx{7WMr=^p znceWiy}nm^PRI9Wqc8WB3&jDVU9VEFJ9aeR_eGN}b-lqVj)AC&$tf49WPQL>XZmu4 z!}E$YQEbz5RqL?;v>RZ3W?h@Q^j(1TK4+oIY$3j1;zyl7S#w#CArq?)2B|D%hA-8C zx_Wf=vNJE28Ydf&jtxH!3VsKMPOB*0A`c8?AY6cH{^&e0OF;;3d7_jhuHEkccQz`- z`=}KrBI1rK{E+25wcJ1eaQ0v$>Xz^puU@TpyH8ei!W!Pi3 zWi4KnxZIegv??o=LTtLWB7t50N%QEZ5#S}|`O-(1 z79e|*9gx|+5`T;7y)VrCaA~Na*L+|0Ily??H7AxPCT9|i-QD6bhG!odV#vhj`-`jd zplTv=aCb~TQIR&;jPS-D=NuK0d2@*m8a8R!w!R7cw`RB9Gn1_~7bP=24_BJ&-^)XIQ}!p04-+GXx-DJyvd2pm+Tntu zFpoldSCXCP7=Cgl+z|gMo&hHjO>|Vbe^bHcbx+{`OJh(23vSKlkl3Qd!WdM92xUG6fZ^^1+=X1B|q7?a2e$#6>(* zYo?6+epEeNGJdnuJrI&bV!B`+)4lnWOPO;aRl>6lM!D^3i2;QFhuQ`YU3ob6L%<5 zs}BERBgQ<|&X_)YgFRiC*056QMV>-&SC765F&}PHP`$ttq}l7^`#TOhlm0vnXH#-? zg&&hc5r2!WJ&;{bwbB>~@X8V3fk>V88PgngBuFW%F2N1EDLfy0;!2*58{Lgt$knKJ zmTv#*Q@&R*%h~-=R9%}l$<(PXQNC-DHZa=j6 zg#<9a++I(Gzavp=)dmyh@Rp|VNh16zIOFwu>NHC0r9cnC8FnD>d*VsIOjm2w!>!yd&r1&B4Z4xUaC4aiG4*u7j?R4pf4|R^ za-7xMQ?K==@HZ+LUwgG|WQf#f8ZHL4pmw-beyN#6mFs zz?JDlz{L)~2MrQQ`ey*1X9z@e)t55*i|WVhOXTj`0?@IBrM|0>io4~#3p_H&->uIS z7fIcE4}H)x1aYCnuI6fuQ8+26QDKx{f`vP`;RHTZ$z+h7Bw@YHu;4xk67d!`JIMLw zm4XQuzqiZ}u`VUR*Fz|l8>=`BQrXY=!rl$>F%Wb1snG&)4J|zh#D%kp@00hU)T3KT zY+c;_uPBXr?E|8kkf|3RP39-6d2P*6M;odd1)$Ww*0ce>F1;|Sq)t-rOGYJb zsafsO%ilH*4yEiwRV-DG2&>mKN*?usI191ywW|~u2b+#}%8x_@I275d*2oI7+ikWn zY+R!^6WrEM=(8LZ6Nx@n8Ob$i>1q~_nHDL?#5Vdq0p+FcP?)War^ue`)$C_BntQKm5EwG3{})T3`?;Kf;ez&Drs7I_={MR7=pFI-kDf;I zuGEB7r-%&%B?wppppf(s)(=hp&!wVW0N!OQ_f8kw*l?V~2!T+8@E+~==N6(eh~=fC zK8u%nn&XqzfPQEzgL0j8FbsONXYQ}d^cL7pOENhQt>znoxTtMwSv2YDJ(QZBv#gGJ z)m{ffzSW5Svq{|`9SC6G-#&<#PE^yk%2kC2hmZbvL%@ln7uYyOc-)aXRpa6fLR;SI z=eXs7L^4XE;(9!vI?~X*w)C;{sx2HDj`BLWOu4^>SYZeD^eY{t(pB+!z{hX71vJzO z{H9&v#GzU}8sRM{0E1FYo`P~raruC%=y%J6=naw?O6ujpbWP%95~`C9viG7L&C1EU zZoJ(+GX@Dm38i2kh50j!U}jQ`bqe&kBwSs+xWyR=daw^$+YekqV6FFEUNgCdBMt)XK!!=ZbEldq7x~5k_UOVM^@q4B-pa?9?iC4eT~}Y1{h@S(5o>S9@BY#CO{PNEbMe zb0p*k2Fpy25D^q@gd;NMvDF}!XB9ecr~W4zibS!5H-BFf_Hu3xsrU{arULL2vsF@bN1;2Of0mAhFnGS5$*X5MaGLq2Ini=UlN!8{+FmiE=Rm^R!GeU8ssT zmzu*E)xON)HVyfxy0i0Mq*ozwZcn9vhe7q)D3-D%WM##OF4ab#MNcUL&ed2!8^N1625nR*E*Wu9F z{sQzgFK8X9P8WA?jXpQ{J$Y^O(wR{NsErxsftQxFY7ee7zF9R?jn@LzwX$oZoxu!n zF(s%_K`SMWjH_NZd^kI$ko7Cu@n^8u5wszM|2+)+llAJ$6_3X{@4ePe{#dd+cND4S ziD4}7gDY{nL|UkXIZ}aDAk6`tN$g=)0v9_@nrLfT&|z{u4rk?-hgLE8#C0_WgrG;F zoOcpduPU)=?vfEwH|Op6>D-;k2PxDcQ6KAX(>o{ z$GF}}`nPJpp-DJaJklCkFJE-QqDraTc5*=|2q-N2EPVe{hRQH^hyG&b?^J{h%BdFL zLZqmr3U5lv?|F$Kuuk$1_coqJvmwn6T=KG4^4Va$s&8z2n; zNuqkVpo=y%7yZZ{1fcSb=pu0Qc&IF^LzE8*ouYxw#kczRum*>|><-0TsYewS4%gBe z88SX1MeO(~god`_?Ff@-vk4I_@Mg__w!9q-0J8VP=1ekI6CuKMsj8*Lz5=GA%>otA z0#Epo$FM042WWyEiAk4y;Y3YV%Ww`kjfVckZ<@x?zBykjuX-^?*~06oR;;z9uA66m zDMe2i?^MtI>N3tzy)F_MS2rM!V%Ee+^4T%3LC`4N-bN=avrN4|`a}PP6bEfXw89RT z+KR;ZF?n3jJ)FLl$n#9T4Cgw&Z)o5pjBWeB(=!jy*yuaqBn5r~=?pWwY|?GmgfaCNXt&zjQ*L^8luvi{Ph zlWA{5?3JW|bm*j}VlX@bL+2m!nPhD^h<`a?*EFhGv$g#yNt z?^3OZ9s>b1#k3H@EB57wxxxh8Qq6o>UX^2zO2<4ON@v~Dy@Y7<{@N9{{2#v`f141)B zDCbU~&H;V*fvh5Ne7gPdP5gocQQZD_gUANK>J(+}BtO%z<4~K&Pcni%N8s6DLNR_S zOXbG`N&1u}_X<>EQy|cN$AGqoY;rc2#2$bVB_|qd(T;tkU3kCWYk`?vi&0**q9j znxaNKgL+A+dW@|LCR}7cpE4%=jRW_#T>EW*DAaJ%K4Q^SNtQ&mV0FPX!op9BM=R{Q z!Vi2%ck5mzUi}>vU}54g@N zD2@sEcy*%+oq#P!EJrJq05c#Jaq*xHQ8DAw$`gt?+^5oAIF*iwz9MwmXd)~qmM~Sz z%%pY+9GzzrvJBE7SSa{gD(B^)u=ZtRB04&)!45;SI17$Sw?&5)B!vR$1s_c8h z)tQ^~`&U(RNkc~#;P#wPfr}{`;oENVKJBMvuEZ#3co}d7CI2;y5@D}L6wB0R%d8&M zqSjL!hF|@yCoOrKl&Va5tpEL3x|~~*e>FZ8q^%xo3{hsL+s+G=-IWzn1~OZo@*x_q z2t%vj-$C`V?`DvACiJpd^!%63$^Jq0igjttMbXvXDt-KLuDz)C-v+!W3vuH*|6!xE z^UGmBD;qsZ3`OPWCG4$M)`iCAbEH#%M~eg>sQwcS6uc)Z5qj_%cb};@*Gqi>&euJe zne=Z?Or0~%2Y05Sw+im2|5!5;#1=iEfH@Vp-lgD13nQvBoL#CDKl8%}6S1@dU1796 z?n(ak(19@upkh;e1&hIaNP7V>8SIUBK$@jT!(33}QUXRgD5t?NGE@WKTPi}gcZq?P zI!|9oF#x20K8D{ly9F4!gO2NxbThnm8-^9_HxN2A^|FAx(K8oHGDO)7+jNt!a4E$N zMhRyQH!1O>Y^qH!E=L^Zok#r1b4^XLY{Ds-0BMMPe#i2cSuEXJyuN;-Z?OFN1!BS~ zN>mRukEg?qoi2kJKuBwpghUqW_q{~iALPM9V#F|tM>|8WQK|9zg+{78;+ZM3XTl%B zMu}LU98$vxix(T5hs2r;K0n|s63!-^)NcqzetZK{rE#L5R2zK~TMBa7N}q2{SV z=0M)@B*&43MEkuQmqP+t9H{e1am-6CtkEyGEUxHMurQS5pu4IXH*rv3k8T9ZxLAu^5q5+rhb@<&Q z2baaV;0KoU>-IiAVsXpmgog(wSV@^jGGJ=o#=$N5oYP(Hbf=rze86GdNl7= zy$%9d+-anedbjdhuc#dmZ92*X)PnoBvgUZ9N8Jdrlkkax3|ZsP7V)GN#N-Cs88MU}6xB&E~%T>(Q+HWIrN56fbY}1HzRxaC-CKDU9KW zlx%30>NpjwEHvkns5_tXNSQ4n12*-#;-AAvJsAwVa^w zn1Avh>iQk;ErUc6v6tWO`C~{uBHJa;%3Or5Zp5;wq#^%Ap&+G<$t?1qcpK|#ek>0$g z16s?)au@($rc#7s5e?J!0{mt;1n%Y$S0c>g<9>dHFReg4+M{b@u}C*gGe+Ft{ncz44k8#apXpR+{BU&^OE5av zZ+kmE(GF*BrQ%6G<9gZ`JU_*>00-6@dFOF+8NanC5C>p*a9gLK|MY)hp7U*ho^7O!7TJ56huX!{!p6A5|DPDm8VW|3a`Gvr z4&UFLNp{)#h5)ce-J+8$lPELx%mk#^X~B?DrTjo7MP3>c&n9UZ(@ccNqS#9RCBjXg z`;`4O4{?aiaNpkci z+F=`KaqgWUR|7xjFhh^%SP6AvX_Q2CWd$XIeU4MI1T~u>beq9w8f5s6@~6=H#n@g( z@sx-iMbBpAfU>lA!CqRnVF3}C3jDaw9LN!tD)5NwwXoUtZD)C_ySM_rgdsIvR)*sb zIk>l5wJ_-GqXt_IZLMEiImvd+e1;sPzqvFpw=8i_Lbi&hryXgq>+P2aDl+n0xFMX6 zO;r8q+-F`7OWOXMLHyoUgGRbw4;^Zg<;-Pb6*y?1+&l%}zm;e15^zP?M)596B=O!x zrMrl|sZ3qiE^+>wZy{N4hqd|_Qj<3#>Eji>*|&x1tLGk!UEF|x!_CpR>1t8AX@62o_m43La$RX5NK2w`YI1~W1KvWybAm4n>_4pfaXR zkUWRZYxY}6L|Mc_!d)oE4)()Et+Bh4)3Jq2uvDFlt%;!TN^+~>s>|reNL&{!@gEck zMSFvv$W}mGR!k*#6#xY2E0EH<19+{ikpZIAc170D1?~hH6 zyK+T;eRD7hT#Wb#jBw`ct=m+*rcHVKL|vpQXLBp6Dt_HUaGJvUebT9m6-EZ`(1=`pBL(^fc`-#O zt69BEjjx8$J71v5&4<;Jg=-rT;H8&ekX^Gj5 zt&R+EfGJlCjV>(u3{zM7R~Qfs#2`6Szn;-mariX*QU21NVLpoKv1{b3+BR(r;gd3E zwnLvH`tiCQm`UL+pKk&qC0)cg1RhJ~>IdHl&0kwYfQVIVld&WX1xLrOIv-9X4Mjl?w)~66c zH-|Iv+C@vIp&7o_#@1wFX>RuNTq$7MPLDNwQ!a zkK^%B^=t5D7MmMI=#2rFypVy{H?iS@9vnn3FDV1;_Evspw967+bAp0yOqA!8AL8A> zPAT!17Skt`DSX1rjWtIRP|4DtPE}xIyGyWHi0+?lp{CydG9}U6`dHo|K!~KsemxW# zj%%O`JB?9JqEXK^DTN;4!D#A^xN@lDXkx*zGo*H#>7fLmmLfFw5$zhiCMr(U#DQhFdg@A87G?+}X{P~AC^M>Q z({&DOMPf~3z92HD!5!vsj6DqRNoN?%`bbp|6LJ>kuV{@c;vnWP_+Tft|Bo4&-!2W- z8}!jT1A-rYd^Je{AQA+Y?Jp$bKRJmh5K5Y-_y5|MUZRR;26q0No$iw#r_Tn?E|3kt ze%3A)#Z{fme>T@DvCHu7YyTR17sfPdTZGN4R=7yhl?+Q)530L})WcUKjVP zloz$>sP#ztL7^biw5&DZF&#HtdHAGefEbrQxFuswc6$ded-q zwRE%eeX;*Al5sebtKBl|?O+|vCQEQ%}uLWp@FTleP;u~`dI%5y>+AUBr1 zWK!{#Ea5!dP0^~60+@%6V0=QVKj*EWZx{U|#}-4`^b~Ni3m`uBEt*^^_z;lXA-xFi zMWnC3sAU1B6+-_29U1se72czi!uyTD1Y!^n0!>#EfAv&4DUM1H%dCB%Cs3oT$OxlYK@n!?jL0qqj(H*SvsFD*%q6ajt<)zhA+16<(NJn=F`mwK`XrbZFov!NhGQ zzb{s7s?0D)avkB)fDK_d+RKEBbBDdrs}XZdwozU3UhT!6!qUayAS#n1kXnO_^;zdI zu~hX4P#UM4j}xjkXQmU#OH94dFWP&!OO)EMG}G z0we@(TytU$qhG<~f_9V4`O^b)|8ix~ClKT}X+<>2_paAd7gF#AMJy99m-mT?A^f2y zc#H1FMJD1jZp#RwTg5L0?3Rbb$~I27Ry2qM{~2?m4Kf_e2t}s`|y*YcpX>rdqYDPH0Za z7rw2>k+!vk4F$K%8gfee&?n}qpYD7a+?kc7;IHV4+_riK`o?a=`tc#L901ptVLPVQ3munBfSTb& zd1Y(!OL6RBoaM-}oIwG)QZ0G{95VetkiF1M)RNPJwk)UytGg(zPCQ~CAD_9~h$Zf@ z9UXbZ1~+VrWCC!i_ROp7FF^~~dYZO0%!7X}X_NFGtM;ds0sQR;41-VG^Hw%Q61mdvL$tU=4_ ziWD!-Z!r{I8^}8AG`#_njcc1;{DZ;W@>X`{OsqJ8q@~WBpmDGQnbc5Y=#wI?7kRrZ*odv7u))14E(v8~}NEv#};=eT}R@ zjjU-wzkSN`I}8+6zHtlY5mHb{M zMj~3xPuK*#oXI`LNpJjOdNnKg)<=^dmg?W>-NHjIGvQD_<$u@6HNB%W5e#lW7le+4 z+yjF%9E0o$3Zm;zE8!JEH$h|2gOns>O2W@maw*Ad_7()Sh9G=e*joQQ)`)$zBFZ=u5gjZ*UUFjC!C*uuhQxMOm+ zR=$jIiQ{X=&2%msv>J-?t(fU`XG|dNVIb$k^Q>w?rK*r-3T|9h#>2+t;`dt@YjAR> zDTR|3%^wk~0FehW(A48l!xN*rxZOuYXE?FV8MdP?7&ezAh8?}QK=5-PEw=5r30cdn z88l(d{Va<5V(ekL4Ia}QVugHKEjr7f5wd!%`b@$?(00ptNKESAetCEtdcYa z?sP~7EjJ{luI}kL=z&kPT6#irP-9&k7yc@3xph-(GhW@36ST(xeXy7v7m2kRwozgC zyp7!qFnxpLA3NjBH@Xr#ry@Kf3EO0tAL%QAAMDx}#U#ZU$pNI2)Vi{wO4mpCiB0C& z;A?+8vl*p@N7|V^a9IAVnX|->9Jksi8;Q~MeY>_lN_o%5N?*Y@IUg0(xoFFz3G~}N zT#JEdwxG}~;#v`ng-@)uSM)o*)~W;5#tPTupN=en4mHzpDKdHXz{#4hb7S$`ew7D) zO&Nz~6N)tvjqOaUCv@G+ZNSZDN)CFP2hdCw6@R2*S8vgvubLIqU5 zdPRzc+9Ra}?-Nrrs<{0HFrNGNQquUC1*XGCI!{-V&T`)DSVMaL7Ix==JrdEi8dPVp_?<#=Dy)oha%hocq3aTV+yfH zo#O08y>rt_W-l9<#4#S+m6YQ7b#y}3F76vmzVC7t8O6i&)gEL^hh3|jOH$K*LR(z< zGm;p=7WTsg87=tk0RSwF-DxYnd-=A5zT!6uyD^;cO#^)CqMvY*R1e{!Z7}>HNy`Jw zf+A;2|85@WmVnNOO{s2hdE2@_oU7-(@Dn!CsPYTwRK%HlJtlMyg2O=H*tHr{)1-9~ zzR1-K&%Y=Cnbr?^`yM^j4Ag}dn)5TS;(9u*Jn9X1Kvhg_Qr!xRC_dzyVNvxU{5Znj zoyLwBsBoxljhc+wxk}^vM;Kg3+WUK465nScb~WGt@B?(JEqQ~s=UmJ1QEN~RCPOL0 zl0D5>oo*V}M29WBZa7VI8qxxb=j7@e6Ow^vSGAsLv-OAoqe09Cy>=ESWmO?sA%`No zeA`gVNm)Rl`SN|VBx%o@qQOaq&~Y5+%R1Zc*65f=b0=E^f%wY`l#p-aZ}({Ev{$(w zLA<3%6!L|h0JS^CC<64;4}M|kIQ+aw{JuRqFyW6H&6@Y|&;kG-w4m{8Qhiq!$!P5x z5j&A)Qeu9$PtUf)P1sdtv(IsmJ@7ZT5<6XwYlUgHrA6FPOr(X#jZ+t7a?NgZIleT& zi^TmUUPR~YmrNHA&5+#NkGqk78UhsNm#6RO#F;{=Gs^_BICI+$Vh{*dM7tA_nj0rg1u&p;)fzI4%klR_q6C&}exD zX*GV1|6F5V!^4Tbb8`L<@j|{7Yq|Jz4lOW=iJ>f~{b&Tf;55bl;0{r)5#qD$iUYva z@v%BB^0lr3!`I+)SRb1A%)1o5@G4-%)>Gg`{s4o<6QuOaQAxzb+OVZOQkuUr6Xuxk zB6OIJbJmWG>PNgR5cSWu6xRE+REE3y|a2-QAZk_RueM?;fEGPV}f)`2)YJAYMf7f=P~ z@!kY};JM4CRE#Uvrne}Tvp|rg2 zpE?nH0l2+D&1fQjIAR*d%#{I41{yT`9)OdFtqc>-<^{Rg>k|qePcC?mZKWJoR5~V( zRr_G(XXv@RuZ@^E-<-h{nZvDPg5?2FMfuhy(qb`$f7mS653!m7C=%7DHHg|W-LHV> zV!%VI+fuCboWB1dQ+3(vb=R?=(A8lC(b%R9=)tp%ywZq&v?k%F;a@5V$)BX%g~{3Z zC(SWJWyjM-Xfde5kB{jS^&c{uV<*Y$$5%_lB9gD8vd^2MSu76y6;G@4)t{o(MA9V3 zqiSM{H#Fx+>(lUFs5l2YzH69nFEi=GdBwbeSlES%LIuA2r5C8KUDG6` z!Uyy(&9nUyJ3jC!2CH7CbI(QFo61N(IyfHr8M%>xiMJpn*EK=Jd^~By@mNZ+k3DFI z2e?yDfl#59Ff=K8&1WWmxPS8{GMx+>+m=WV#?!dqiSW$uS;o9Nr=gnT{59oQ{UtbK z#DSw2y%f6}g*mZZCZtKhFrvj{=%M0KCgrU;0FPLPZbtl&PSxU1FaFiAgzfaG=Hs!G zrdQAfa0zAitJl9nRUZ(OAg~Tru;pqUc4pRbk$v0QsqZ8*^nF#rQAC*4XmPtgo?^J! zb_SInhp(11DkG2so%)-##62sQbk0|&&4>nyCyIiX0TJsu+8e+_$Gu-Q13#T|VOKQW+lM7pKIQ$v@pr2~aV0GF9?Ra;X04m_&K(k5#3j!Ng`uM0ighj+*{Ng0O zFvTdndN{0$2CFHn~E^G$K?AYKg99##A z0QT~qmhNq0i4PGx;t5RzqyI_c2jbYHY>A08Vdsxo2+>`9fZb?oOBzAxDV+!h|Djg+ zFG&3vSkS2i{t9HwrJ#+t3?v7hRB4&s;M`ld@e|T76?n7f{rh6bSU;)u z3zsUiQTE=WiTY}-R@)SHF9+wlIv3FlsaWR%*O^dZqal1IVB#rmEDO#YaGhujpBo7c zybWP;T>5hUShg_@AK*~D{>#5xww8)me1_+!7rMuk5U+XLkD|$eGw0oEQ_ktE_o|e& zEE1+)PZu%dSLQ_+Z7mP|mtj5q*A;>A$9!QOI&aM0&_7BLeIm~KAq z?FgwbipZBt49UY`)%O)jIFykFyDI=~JCrkn(Ckl0hd=r)2gA25pVjtLQM#eAl)Vto z_w#h^rZCT|5Y&ER!69o0ndvrr0Lc-?fUM-4e}%3FdqJ~an7%&*#^=z6Tcc)1!k$J^ zWqV>M7srqrEuvSwET=FS19sA*G1J150n}YG5|{0z7rLL7QrTo}K0Mxp7g%7jXtGKP zADM|Hs+mf`VeDU_cQC_kFv*spjT^j!$_SZZR}nfVjf=5|YlGUjS^nCn`X&7MMeiGz z$9%O10B&vuJ@C(FTsXn~MC@|Bw)BKE`b?{~0~e9JyKE+$h~U8Yvt^WRuK99TQrxrsBL@WO^O5|^E;+1 za^!ffm}YsV{7##ZqhI2Xf5nN|r$UQ}x9=D(FprZ*<1oAUD@L$beVvld-DRUYgfKU4 z=eHD`8%nZZWg;d?TXKB!?H_r@+Ljf!FIf*yp@)!@u!vPqX7hcb6Q~>qYU^FRVk1kO zIbP&_H@O{Yqw|f+`yOu33n3R!I23&)~m9sbus<$%qyFf2HP}+{P8d;u{)~0Z=^na_&x$l9 zFN1H{Yn@{)9}kV*JT#M|wO&s{vqGp?Qd{+-%iXj@iI!W#TtU3y2;`z*2;5V=%)ojB zN1#Yd_BE2(Kb+@S6IQTWUY-`4C-g@dzxC#-Z3v>aSo|NpDcQe^V}s)zCT|BFwM-(} zeq&liykRo4%0<=j@KH@>%H>VTJMy|U>6fI6S$f_*Esb7{wgSO*Dt z#1Y27A&>Q2o5&c3hxPie8_$9paAv3Qp%Zo}y&I!cM<&VV(tPy56Oz5i0FL0)SdG48y~Ws()wz z1-(zD2m%Es*JF%4j3z|Om)6BcrmyMLuL_*>RZAT88b@ID>nY0I5B(!qI2_L-M{{ym z2@kkoM0mozox$eVOAXr{qt|>hJX~U|9GNvakW(t#6_wNqVry2Ai6-o~+e8<2H0HbH zNEB11%zfs-#R&&KaqzL3E43F_x&qjv*c9h(Well|)xvM8ecKxgqj4q-WJ;>gY*9LAs@S~gAyP8zYCPg>rsnFnu}g0) zQ9mmZp=b*t_-N|c6-vV(R66v5DWz?0<iYebt>0*Ju+78=`w_dC5$bO`NWc?Tll%tlUrGZXfDooTK&55kd!FK#G(=uO#diGB_$Psn1jIr+6swDi(06BgNieJ{lox?1Jd$cU~d^ zHZ>Pe>F+3y?^Qt2n8KhVFI`u8lu6}jO&efL-tkT-qSFLU-@Jb=b$bve?~DQYX1{)) zAn8l3zum~e>slL$UMLqGk*yU19VJ?!mMx`Y0vml7*0p~`8&)_o_-Obi1{i9+v2iGb z09%qQF!J-}K7#BF^vr#6NSY?|UH|V}GULZQ7acWA zqnip?Xf$7=nd)*Q|CPZ?Dhz}r!#Ah6VJF}PwxDBKkg znRFxD=HJmK;o_FD&ufd40&cxtzV+9V4Dh29>v$P@mLYJ==fv}a?DyZWU>I%K`HrW_ z3XK{#i}7|#=S>mxpP>#Y%r_bv8V`%h!lz`jw+^;w7vFA2>FwNI;{k;=lNlN&-xI;=pbB+_1Lo z`Yg~6um8Tg9T{R3#YQzrEi4ss!^gLdP=w+<7HF0xJveC;?4ImH;I=p!cj^-|A$RPg zYU{we2gIv`!vbE1gj?ii%4WesyXf+yVE#U)F@|b9 z?(~InHgjcGG8Ud&JHN!1{k2H>UZGHc5IV@y5Js(MPUY(nu+_7uJz8H`U0%<(iJHZ6 z0|;6L4M`e^I64aRRgAM0X<&B}`dndxL_>fj>uGBM`5vxmPzK)dJ;p8Tjjn@9IPKMi zo&vQN)VA(ea7 z(lkjm@63Cwulh+N{UEiZCa0B>2&(Ntk`JnCl!wuYq9)n>d?p;aD@`5UUMC%LDbyoyjS8dIw67yIMH zng-|D=0NqVM z-OJy-4xyjc&Nj1NTxn%Y4NuAi_OWMCpJS@u*!XlgGmir^WJ92g8%k^lql2WTP>nD+ zQ&7XX6^3d95UN@6v#-`Id%-`UB+nI@jjJ{)ckpunveCw-j5G&! zX+!Xh2zGV2_fW2U?;PFwzNbla3y83Uv_eq0U!*F!0g)JR*uo zZ=}KTVkJ)1jdq%(g6J0988oKkb$k*`cyKmz1qqE`;*D|_Zl-Y%%yoEw;6z*%Cn&Iy zzq|F~MQ|iUBBG@#15km%mW*2b{;MweUXe8k=`XyMnGiXIZFL=P%YN|t`W4|O@f!C7 z20fkdG?)o-R`IvF0`g0re#CZYFN|$msG%g0hucA(931ShkoH$e;~DPZ;KA%fCX~k~ zmR0C#FIN%NB_&-?e*96V9y`DiUE4$OC!!UB0~;URR?fmA9{11-k@Oy+k5&`lLdHICt{~vE%Ii}!s*9Yh^;AVUXZ@3_;%LnTfYJ2 z&RPz2X`Z?UdA!K*y@LQeK*PTZpH(GOI_8Nyss{Kpel2VL)By|cH-S<^-kbhTpn?KK zvXzb2%24}39yO4j0PZak1F!tX6SywPbg^jI|7WfuCj*`Xo?#2^3qu^!%9lTYy4sRo zvK!wZIg=v5@BkLWEN|QR!9Co+v7o20lJ+RSYc^unxXp-GM1fNg#f&)BMAhY*mL$r7 zk21?=2aE+Ze&xkdRlOmG^D5GGQ|b8k(#}`>U`T)4;zsIio{$JFf^)b=AARH5!RH-u z_dg^lnVe9qmbmP~F=)1L?g0lyIQ!-XjRsX!19lsv^a|&q`Qtb=-W`?W+CdrR`Ttaf zAlJnDIlYlv8a6b>Q~c_GBA_4ofRBIEv2sgH5QPd4(HQ0}{_Q@uxr+y^d$cT)B5W1h zpEFa+C%-T@8_6?Ar3OttK2+)VDE;1%q4neurY2d%deV|kcz-J!$9ZA#ufb8QiyMka z|DeS`u@5FXFJ%BK?6xWpGHlrk{jOfvQQ~@(T^@HWt{DeGT5CEy9~(x!(+U8ddj1| zOj}l{6wIEC%}hNe<8BQY3*FShNIr6exErp49Bu{IdAy)zs>uXFm>K6Me{15Xu_*6Z z_Pr%xffQ=a%zH@u=HnbA`)e=`tR}via{W?^@eRAbeZI)Srrl3qOP`)PQ(l>Wm5OLy zw@#S+(E=LCaOyO(sEIbK_ll4GUd~g6e)oGvIujGnhrfR(q||OJR)}CEoJ5JKWyQ=> zQVw-t9lZkQ-r0(Mk}#zZn?`oz*{*;7@fPP|jqDY);7D71`4)fIv;0OVXL^(N(1Zm^ zm~1+jA_)%B1491td8)3s(v#G;KW7`*?!@anaZ-AUm|{HhGaYh3vYzX9#F`w3VsLWP zNUSiB$M(`aE78a&FqK$z3lpui4&@*v>C7^QUT&S-vxj9Ds>(t>1THD>CREcLbR;wmpA5h&orV(Q$p1SA zeZqAzVCB-k==9vgJBnnMIr&|3)NYW0v^EuL0xH3XC;!hbQefXFq%0Fy%7n``H~2HF zAk^PLXmP6-04hK07S-PNDWHZAK ze?cR~RG;);d?1;mkh38%#<)yG{j(~n5kiPyJOzA5!b+X#rW(n98C1gEZ2Ez^k?!EBg)`NL4 zt*F!OI||RuygwM~rkA<--WyOt((Zu*fy@mhD50V!6OkQ^g{tzgOvrq_dqbCBZ4Ev_ z1A+1gpm7R0rLg?>3{SDu%~Qj#kzeDPEa$`L)o%v(`}|3~k1r#pD|o4nL(SaOO9W9isuS5Y;X?0l9AF6OTnM*)3L1R#l&&-8;#BbvnEdf{t!iBWi^jlE zp+2LD{j}`lPas5|57s|ECZhSzM?Q+#4^1q_$g<~l=n-8M^$7iIT%i46?ze7jqDwZI zE+!pE5i5BnT|50oguy(+LN7FcR&-g8k?`41h2*8KDcFI^f4{QLg888u8iW0iU@En>~G8|jT{MzNz zR`MJNx>oVe_vy`tJ!Xo=n>?~IuIXyQlcfQ6OSdu;k&8+)h>MMA{zNC0Jf}%^Bh#SW|q@1hC&f2v;CcXjxUAoa(Krdm58K^al_E)BKuHQO8YbC~v zk&6W1+{>T^za}D$ygV0mncWQ;bjJN&efXU6fl6wKMr=WoJ~5y{Kpxz&r{7BWR}S74 zDZ4GbGqS5Ci47aN$pr+W#~Q=4wHHN11sD(4FjxMW;Wb-W>Y51NnDy#wAb|a(kFXgt zClxn{=4wZSEmb><@X;;sE16@#Q@V5iH2F;4;FL#ZW~@4%b1S}j*`oME*&`!lVDj+V zM6S%lul<9QeXDs0ZWms^91_d%DR?9|p0OSs|FoLGicx56nMqfMTn&;L#<=I`h$)pk zqQGl3aK`+r)HBkG>h>F)?#z&Z=pgcn@qqMNeovQNCWs^>Y!Y9|Z* z!SgA|;Q58Q`dC>{ziW+W+mw!Fy+^-0IljA|fkQ`Q6(MM!JG*E{mP*J}t=`xm6wv<` z8_-RjG!5gMg@ItCcH2SJUTV%0Kc-yfu8#TNi^nNQSi3x2>~FA!oU3Ba005>wnB#pn z5mlgHS4I+fETir!z%*&zZWy$}@wz{_8*?o6AFX0j2)F2+v&Uwp*Rkputf)0`W1A!^ ziAmlT@(80xFFr;cMx1xaif%qort9Bcf<2(TKa4 z$?1Msh^7r46aK38vR5rXNtx6ZGdXof+6nHsbcM1TNW!zuRq$AsJ8qIhdUTrOc?OB) zwb&G)j4K2H0iTurQ>m#DmN2|t$sYya)@X!^En^X#1J2?`O8_GjMZQiHdAaWg6Cw3i zbpE+sG2ckd&?xY>z-zU>U^u7n1w3xx*LHzfkN1_THl)N)H^><~K^YqUTa+%_JnTAf zVE+D&8flKFx3!t5U{l(A1u2j&Qi|@}So}Lo#Be;hWbTQNQ{8V*2K|$f)p5;Ajvq-s zckSIKYcVuI8Zu=MRPKopEP5^wr?d2Gp%1mgZMhg(-()N_8>0$)=;M1lI7E~={1&Y4 zvAOuXv;UYF_+^31WdmY;z5gZ&Fv$)L7g2f0P{jVD!~j$sXVb}D-c!48juK}|l3GH> z(r)`&BPW8(n`f|j0aOp}PVi3*cF_F2lU%ikmEffo%p_cK#|`vsiK#ctv5J9sV9$*& ziXM1m-2HiXF3)1yYkiFZ_xYPZ<4`K&nt~CCfz<9c9Dw^*#_I%QD)RF*RwLR+A{ncc z2=sTEHLr>SlQTkL8;vz+{!RpJ>5wy1F|4V*D7$gA__Ej)LmrIpiQ}9oiQx%d54_iy zlDA1BtDn}u+io;wbahuZQ_Ar=R!mWoG;e|)pwGDF3^sGoHbJcFED_f3ZVZVH=TFNz zcnM7We0+Hec;`zuVj^wqD+m6F&A?W6G4tk1ve<-WrNFb>S7P%*>jH@737yOrNgNDM z;`vW(dw5EQ&tR-wGa@q{D^jJ{(FkP$#fEaj64=eysT@vD*TALqen zF^FXLsS58VV{8>jhgj2zyaBZN!{+|mI!dl=F%l!wnO)GrL@wToqb_s;+?05U?^T<> zA;PF^ROoBo7FmV{u88R>sPT>oH%eZQx&>kXf>*z1^edzD21E{a*X7OOg zZ}ppy@}pTtW-z=s-G6?*1#0nAGDa^xQ`CcXF_77I z_mHk;s!-4}Q_xBp&l3lzzv>y{HgAme)mH3xZi6-CLi2d!Cd@^dbW{Zi?7aSRYN2N~ zL9=O>04DTzJMq;3a@m?d5ZtorxdxZ-#k0)nT_Do$pGapqORw9ySqM&aa?M^rO;?th zAWZE;k6oSAC7tMuA2SzT#Xkrh3t~NYRttg`WrHR_y_53Wh#9jbPJJj2s{hMzkkr~Sy%uvO7!%}ry0 zQdL+~;3UwVzZty~oc(MGhYe;BL$SM5JKaT0t})1>YBbdWD+Vzlb3ozsd1mXo03Qqg z3=v`C7InJf-Gu^2FzVML(*Y;?TlGw}+!=P~W+<%;p94kNpOZfK*L=P{VR4Q0*k8XU z`q z|BWC}hxzYJb?(f}=1#`B16B}Jjsfk2e#>D!H$SxYsnz|7h}rVve`VE0jJ!F61E#Wt zoh!?h(U6u1cw;8lSHpL{4{)^3Rws$Q?3;|nap;|5?|08Gk)_dmV#j}#@Ke3I+uh_} z5j&+eI1~PKz3CaAMh1d+Wc-M=evH>TPP#N;_z4CeXjEQXUMOv7xKS}DHRY^qrEmz3 zE;-8n0)W-HOYJgAH;aJTMlK}rwLiIBN!6q>%O>k^Lb9SRlHkd|j%P zZ{$)StChN;B1hRIv?aPysiS1`SAc1?g5L<*_s%uVEjFHtwOMgB)Jeo z{5uMfO;X6Nh`Bc1Ct26zcX)II#;ZCS0Y4`#)@WLa*iAinFkQ-U3@_!9A9D1eXzb{> z05)GwsHR=ut%ZVX%N@u6J3h(4zY4F51sxFv_{mmWNRj{Yi9XAV|jcPKjAA3oN*@6z;a$)ZjMGw#D53h_)f3&I`BmIIFke+>U;rD zy&C#Ag%LPPI+`{%Lu6Vu86q@Wzpm2#j&)x;YexQmv!9mi29ad16L&U{kOYBOR5%D zWXe{iSJD=J2%4V$tn857iInSXaI)wM4<{L5fWrI$7y4aiJu?8grOhKr^Y{69gc*?S z`DUn(`eu78*|-(oy)Fx?Vz_JK-9>iLE*9-;y&@n?Gq|gHrc=%EWTC&es?v(qgu+a)e2s&P4l*;y9F(;0pa4EEH^kp z6Z&c?2{dONRl;UT%+NA=z9+TxgZ;<0PHtz|z^3)RUwf6b2f4&wzWTbjF;l##*U0T) zx+&&qjYr;8kz5y$DvAVMRPq$TmiJdy`-&(A^y((1>6}V3Nko=8#Ng`tX!|WFUfEE9 z=#1`Y2_a(>sT$F@N53Y%lY%q2jSBN3+LfY*_QtAis*PkZ_MuZ}PJDibEov$XEkwXk z-WAGi?z)zVsnL{%?Y?-#((hrI2m*g|D$}lY9)umz8fcjQN`)GYKl3oF9~mRl;H(U{ z<9e5fq+11Um`lntYwpmjZ+tv&KbiI?pm5Jr!tD~Dn_FAO0gmDie#06=)OnlwQQKhf z&as`BgnVb>Bt8b0)s(+@dD;2d%U0XO4L({zn*Uxysz@=gKTp{rfR{tLP%= zRC|CqvB*t|_B{@rw>@;{*i99O{f=q>mY9ZzX?_yN$?I4TxzG@h@@($EMD0wZ;~;#e zltptLTGW1Co9aoxcGuwgR$nVbHJ@V0WX(Ofk%Ny(4|+NIAcQ{$vv_}}B3j8jYMp9h zyI8I)K4YerQ>%sK&Qw+*Yv4@V8gi0fbn}(ZtFgjWv8G=BJcXp*@AJvoJ01Y;e9FpM zugyVjVfKfc?0~pIODK{ra!1AZI201VN3(Zari_b`%VxPU$Dt}It~&YB>Hhs(D%pBd zpcy^jh9^%Yj7Cthrh*YRk%j49g00$`tk4>jG2+=!2oT?zx@UA8XhV$vZ3A6dMEO|g zYqv3`>1rx{*Ofzhlh!^Prf&~`l+koe^3XV}CgtG{GHRwBJ~zfFiR|?aaD!jlTWEUE z-=+6>pM{E6Pa>g?i+!@P2+>tQ!pA5RT24`J(1D9lFd77IhO4{0MH}LX=jBY>^8o%9 z?SXMKiKvp!H$}75dI42Po(2nUg%Au@X)Q--`{2g{4(gYn6-9R;bx5)w3c8v8@{$i- zF7_mx;;W4zRinY|)t1XDbTj-8v!t_!4e@oTAH?&82ip;Bo=cTeeM!pC(^&0h9ZG70 zXXWgcS>igzc>Lis%2V%o*Tt;DrJO6myVi_-LGA>kA=D^L`j*jkvX9-VJ}DFDF$Hj% zyLfXm(Dg??o%n{;r9>L{D=%#FgYTT!T)U7x0T4;u`!9<{lQrpOFEUb}9V|++h9A$w zDz6+u4jhdwylt9(qcLHu!ub)Mn{**J_0iJ)2a$NJMDq<32(<6P!vy|^<6i=)uxYvt z){3S|jh3a7k);u;L3Yo2JYCH@V)3Wkhq39T!+Blk6MwM1wV1oESbiCmw3ec;>2Vmk ze>Rg=v!iqL!}_+Be{&8gx(^95z`Q;2`}#1oTVNGfFeVeVENg>>#5#$g?o6mGaP2?q zjz+Om6t?upFHWl;s-FFB7uU^C%+;F;)c(0X7i$j2ekoihOKClCODT+)CbD3eaqt~s zJ`BWe9wi>tA!jrpqCl0S+#{R-VLmDwt9}UEm5Q~rqb#`6S71c>Bpj@vRia9W>Ag-M^7T#}uBNq53hdDt$(j$T*jUHHG%(J4MUSOvVf$Kl6v|$P@X+Vj_0J{WD zx+EFb+>m{ENNa{^EW`t#QF%;OE{W*<(Ir|{KT|g~s*`?b`=2fY749||ee>lBX_gxy zV2CMyc5Ij)P0ZR0HKZWUvLOX=12?c0YnNxiJtc<{)Zm>G6oUc5PA<%NsuMivP)};R z@5(=35V-y3E2t|WEPi=J;a#6LKM6U6?y#$%A#Qy+L;G57X>O|7-Xd50^;n}BgGMz% zlbGz6G`U9!>53z?p69o~_G{&H<6HNv!nH{h z&WpCwhaa!NW+n^1%8!VkMByO{J|;iD5yM4eBK51cR7SKg7q_s=;5z&c)vW`Hi!i(w zCC?E>n+ICSIGi-6rmp4BQS*$)0BT{O+ciS@EjE&+(#Q=r88q8Ef%M(Cn=Md3JeNn% zRADiahoMC*Igv!8m#P|%HC8~t;ZU^7vb*^?Bfge29HaMZIwB*fb4s|?EMEIt<6#}D z*c^>KWDVMD=8iTH_5|J(RB6ULO_sL9z)QtPvjQ?AAh*Y0%OTffV*oo3Kh7ZZiMie0 zC))RFdK*-!*rRuKCjSv7Bx4}gG?2;BDa9^^fuV}X^oHJ8Zg-OvMC@m zAL|bU;3fV#m_r|t@-AjA>Cc-5o$TJ&_IgD{irJ003hdRlL5xbYcscyw%yI~xw$i$--tuT zPPEq-JaBs6y{Z)T-rTrxXU4#kSS;)s5m2jZYOrDPI6J_MTD)X6UEz>yT$YmBxxb}j z@>4uB=;YP_($l8K_5`OToEsYZ#xEfva9eE7ozy6UPenLuEXEtOc9591DKG1-kuG7= zjBaS`Mvu{IN7P~7GTjme=cAU;JD+H8p7GRx(WueAyDI!l)&}FxSf!{KS^={3qEpXo zm*>J5e&#j+Li%4&eTV|2^y_bMzpK&{6TBzOEtChR@|7G7zFM=b0TSwHaAjNX`k&qX z>H#vvFpJKlJ>k!4Sjl?zqB9ZkUeKw)d+;n3)iXr07O8vpr#;Q+5c%BS<1u?>-utJ9 zj?j14*GJ)*05#>l^Ue`vORiQ|Fl&ZDCI;7QD~r<)%!VLMkOZDX_JF+>i`Fg2A(7%7 zX7-H|umY;&rz<7op^vLd81z5jDt(iB0&04J&KpO#25(gZN?fo3l@@%bt>MZd#Kk38 zwMB_CjO=FmI@5JeR4$N&>?IBu`_cMc8Bkg$c%%`p7nOSEGjsN9+wLVB9aWb}9n3Pd zJ=g?oXRxz3HR)KB)OPhF_!}GcK#;M=TWYV)p%tyt;H0jTvBPEGg%OUSJ-aTtOqpFXn7V$=gBKaET+v#$LMyjcQP<3H<-0vV4-41a;vA6%! zG43VJu!)ilF>du;?+E{^IXEM_cSZd3!wqHRKB_$LjB>yJ6bM{iA=EVQVJDS<0HyoJ z=xy$;v5B2>L@=IRcf&<-8YpPf*7+)g3o@*(L|%Z@wL+XZ{D>)jYw0bobQ_MlRvzHQ z8QxA+(f9lz`T;qJf|_zOYSW8I`HTo`GS?o)kRNTjc?8tZ5V zw7R~CpsW5tY}?L!h6cFd??2Znes6FI0jKW%-7@#%+u#r;H9RUaudw`<57yEUvppF? zPu}aOT54?LR!Y|hMJVB`b(kD0znj8IUo1=}8cdMDt9vYJMQBv&72y64)L-nolr_(l$+B9EjdmKD%<`*={W5rVNC$ z1qPQHCn`T&4Z%DiFA+3TKwfU`MIF4Smvx^mpVn-uWLTgQ@p_5P%)h?_GOO;=iK0lv zKiZTG-7kEZitf^aiT)8jTCC}U8mus#KVFjDwwg}*i`Soue zP#rD_9N4O63#88hMr>A&p4}n;!yjhH_UjN~))T-)_KFw$;kDzDJi1}QgY?1!mE^NG zx2Z3^HPpPT5H25oUGTN^&+bs+kfefkEC9jB2mjZ!j0sfOMynj4H7UyqWMe-_LQYZ~ zx_oMarCk8MB^lUce$?k558+FuQyGd(w6BmQDmA(L8y zs<9*=z-1$CTpzIbj*ds*V_8kJD}dk#LM;kICgTA{+G7g={bj%G&B&2>}QIkmv_$X`l)JIBYh zkmTQCL1zrLq;1!j()lbiB{(Aohhjb3C|BLRqLpfw`%s=CrI#H5)hdDdDb$Am9w zGrWLnH#9j|H3HG^>m5ITp=XU7$I`cl=^`J3pcIP@xIlg!4Q|^-i$*=-rFD#V7&on7 zO@44Wf@FRc>c{mK{q#71UdmpI?g+yW1+Dv_hoook9>F@tTD#XZwult`Hbd3VbEJD+ zbB1(Qnd-(^aIF`y$=)`FPh|-8CVYCCc4$#X%Yuidi{#ZLeIk+Ke8g6j<*u9IK*sC9 zpug^pB^3zGiqvH!9FPk;N}A{5cBbgH+_i@>rPn;uw3SfoP~U{e4e;;=X$rJ+uB`9o zo2rn3nnU5{i(AKTjgnGv1*8rT<{$MRtsu`rLTivsWZ$|egcOTf*FK2`ZKctG%a}=l zv_pTti`P_6hGvaci~2i+tnVeHr@vXlM`@emPZa_QVsjtlC z{AHZ0u7*2omq+PgTEBobIlt^PX21@Pp5}s&cEEUu{}RzPKc+1knS<9H5q-FPuhKdA zLL(n>%3h*Tg^*Pt^;NGy+In&Nv!lLVm<8H!RF(PdG`L*Pd6K{Ta#ZkG_&_b3xHB5s z<2}#Q&}WRr3Ayt=Eo_hJ5c9JD!Qh(`_YRtm76^4Z;^wxwN4gEf&tW!yQmc# z_wJ;p`ar&YgY`M&_Su{&v%RNU!)M6Be>gmIAOdfRw~d>o{w%O%#-9+&m7sH{z+1B8 z0VWyhby#7lN;{H5Ei_VjH|^%wrxGv|?3tUx(A28m--jm%K>@8#Gt;3}nCkpXLp(%33q;z1!Az9vy#!bMY!;z$NWL2G_$f5= zk>HmyyT43}U-MG;LFtK{(NQmq2L!JQ1bjTjR1Cgxt-(V^9QNTYd25~i$gWO=4yZLgKrez^(K z;EHa%qjFhOIUU%UzlWev_TQGWM=9krgbjO{5GX~{aUmSkO-&NZfXYr_=nSc)B0|j} zvfsCK>B*7QF?@PSM^Qqb=vf8xMflqULuA3wT*hXsAPPbGf)uP%cBPu%#_^nU-;X}e zQlD6C1H?}@_Vjc2QKzD)@xn{#8Cl{kB=bAE}Tsicu;g__Zy}9tyIQsx__)(0bD|8yf1`fb7ArY zTl@Med`FZ0hPAL}s{~f?u>C#OS8W71qa?SXG%Sfmvzlc|Mp&T+0mpqTIPDtA<^bqi zpumo=ge4h)OgTl9B&E*1Jc07Ep(6R(^f2C80PJ~wuj;|czOldscc}b=oA9Pk&&gP$szvK zRmV<%_n9j@W<+EJD;O$=ExiVK0P|q{_6)7up5r^Rv#`D;GaQz=7cdXzBoJ=Dy@|P@5R6BfO`J zop`O)>m(yAfI`&Cn$ev$aaUD3^X2^LoJorkvbg-af*kZLnd>7`jl7GuT(mU=m4JNe zTge9=r;zTpg4SxXHe0Q3o_MIk-+zA}eKgk5Cl@?lYu-z`i^sj040AZxWW{(nF#j~%;v%to9a;+6D|D!!4 zh6FtF%^%F;nebSw;@&t(?`SJWQ!j_#E*S3lkMLC>snO<;Dx3|K|DgCiEl3}wD=Eil zUwBLiHs4YCfh4LZMQj0468BY96=IP$?WM4a*qlR^zNHkx@;EZG_sv;jEoCV#))MK5 z?$DL9GMzX}hxySL#2Nr@n$+hx_=8_(w{>R7e*mfXHP|=P6eovJr-`)KkELzK3FHMj zh4W2HnwnW6DH4m(KiasCDeZqy;-T)12gbm0=nTZm2Pk9}c0HdSqzDTVQA0Ko_-Hp3?a}_asd}$kTi@RPWgz+30@}=$ZsF z#8gDsl||$28$e3l>mZt>kC1)e`^LmCEur`tHX9|vlxk(M}U@dPPQTCJsr&w{F4LKzxN+C1O z`B1Z+&5!=eI`j0tF3KM{Q5#S{ zjP{AB@&uYNx`CdV#g49lP*opH4KFYK*}SEu3TZhYKz5mr4%~#jIy)Vx0B`j6fXVDf zvO!FixvhwO$J-3ZGgxcXmtoekagtI%Vy>DXPG*4<3XqIN-z|;(X&fN^^7?rR$MOg% zl9g`u)Z#mOFH$xYkW!*!h^vC6K;jPk%_X`vXcEMr0>#Y5RC>}-e1z<-VOw|{O%(Ix zikY_R<}$2+u^x`=Sz=KIsLHpTw8{I#B>p#t*F`yiIS0%|8k<4z={3+nak>ieN>j%X zs|Yyr+9J-W)Y#nw#GVoU!w+Yy{LB9kdsWfQF1Gw~JkSlqU@VK_OI77!;zvLYMh+Se z!g=A(>%+$Pl74hl1?&qudqrx*7y#H&M8@bI!yTUVK@ZC> z5rG4V5@S|5(H@OzhPCA8{x%d_Zh6LtIceWiSx?_I;ax1qC;;JMrjN3gN#9RK6dzKg zSi#>Wb6lh1<{$?De+#mHb$f@r18~uv&yG)sf2Azov+t-Q%Q7scTJ{N1;x2z}wxlW( z&`8MeV$l__uWVm`ES`CjSy&xPW^*rjKyj-U;af7NYAXe&@kk0^1q{XMGD`~DwKB9z z@B|Osl^%mCS9)wRcc@plkDe8`-5pv(osyP7bVB3QCP?+pAW-MXwKo#PQ1Gp;K3L~3 zSxbkL!6_`PpsoWF{xtO#O-SM5MGc<0mEqEH4*`sk%73_;_QNW;9hh!FJ9f48Fx;-V zq}mf>4cb-{+;R-7EdhlQ!suFOT#-WHLa{Gw3Qrhu52%sM#5g_7xn;vi!XQtk8os-B zEnT#`4>-+4POwkTMNqchl~)2rO!T1YuVs{TN)qeB{l5KsP|N0V7Km0rNiK>`HUhJ< zj#a=<3T}XXNeD52$4w{Rj{%=qbX)0H`{QIecaZ9hTP<_3o0`oVvOFOwn_QqSH0!-s z%)L>-eXzY70Prx zg8Z>DPLh~13@Qx>sYobq^lNoBzjL|yPSGzj~QTY%aKX`Nt z*FXYrE?5-(nL;{p6AaV<--(O~+;UvCDKnZM?w(cu^Q_=qx*srJZ86!MaLw=uF2uE2 zZ0@Z_n#gp_u8_yYpSMT2EH(70_oE1e7e+!L7Z!l?)}H(6Rp(QIObot22K!wVJh*>@ z$aK`|(`0V5WC10#=&texSP=*V1jCwJW=xsgahF18k86p!%{w+-U~V{JxfK37oCNq| z{Xt>gK<9P~Nyjn;r|*^6t^F@jRPcA48LEMFOxz`SE=a)lb;NwCSkt5yZkS3QCsrQf z$=$TM{NP6hzsbR|d~#)g9|(H4vwhJUF7p!*^WWsf_f@V%&=&Rj=)vo~A z#5nyKqwB>5X+Hz=VSNYJy}Dlj%rdyuCx}$tDU|o{i<)wK<=+mtD9PBGj&s>& zVQ#2GRD#uq;;4R{IOAk#>`1CFZlyY?7;X;bBs&37obKt0$F4MXkej;*VibMCdV*D) za(#o<&d~4Lv4FsOq=e7!J093&!IwMr=3hdhTFR&8q1vR7*c1Fk^NhknMlIvsvVMHI zKmq?C(Xv4qy$*ig{vcts>_xv09Ik9h`JJ_*{9iq!5$2bxBwg#Fon57I36*2cMTHx zKzvCfajz~P6nRn5+u5?%RQyCJ0Rr((>bd)gNPc-p8#AhJfiyZ3_SM!Uq~F!6F=f=v z&$fIMTw4!*`^5VnZI9~uLX(XRG*8fzSGa&zmMbDN=9%=xe|T7MB#{ottE~g^ZWPK9 z^~&0z;if@>=>jphERf{m1l{P1ZQSa!vbb^ub<`h6FD_3WFRg2n{4hDJm$)qLN8i`q zSN91&z(8Gc-)(|=O?F9~@vlxVN9fy2)I0KmCKQf9YF+pOQr{Ufv=1?-3Gx{gGhRo_ zy{=LP=o5jM)g-+2ycd8HQ4!w5TAn~1SN!=^^T$VT^1UC5``> z5t+t_q$kcFcS`^OgyOh}F?mxz&8z9QVjjXc4sWSl9ceLbdKXcEEmern>HBT!tI{oL z^<6pXX=5?_>vw{<`V^Chlf=7&JjCALZsI1B%T^a|qTI@nB8+NslOtM`3PcsEMyiIm zoG-*y{>cgY=b@aOdw&#-RpnxeTk7VDmuuP6wezPYnaen!F*S3jJPr0^q>1XIf`2`# z5JJ3-t)7!m2?H6x9{@Q}>9Lb&eo^oH?DPn;H#1sAih&z!=}}NKZ5g|$ChND!Dd1-K z9^s$DW1;8lL{)AFNe~x9b7@P6y_@pkLQ#zh{fU;5m6dP&A+A-3BX#(L;Za`qQh zt=Yp;mDAH*bd`AVpb0JXZ82&INq@YC9w_FrquLG9ILNK7-z-bpGEj(IC;za3%jY`0 z7p9Rp|DQY$vPAe_y<_bz)f#X?CN$?Vck9BUkSBAK8|n`L)H*{0K({5**b=%&75dAzEqO!u0Bbiz=SK*&nSLf7ZOJN_Gl9`lZQ;24r?UUM`oT>KC(g3xk6TQUwN}?*a!)`>#O25y^Xqo!J-t)m zG6EnWt=j}$%xBzgwXOVwY8lqa2NNCZ|MbfqN<62dcS{(-)#Iiaj->|E+yz5_5hwSi zhz8^0cnSc#@6ApjaMl;gC`4=K1bmyUC5$La|1AO941Fc3pl4g5qkn--dbc_gnT%C4 zK#3+M6tqM_oN#-6a#w4OPD(PY(GoAK!hOxe*foG9sAXVOh06my*qn^`-ZF z*bZ@9&zJ%LXeDoHQ~# zfEJq%wp{aC#R418KFiqqyw&CB6BL&=)bkA*{Llq=<<+|7Je_E3Fiz73F9a^Z6&K|6zzEKNX6w&IB z9+CHOFy;-FQ4Hb%U?^a~u~6xP^-0me*E&a6l7Q2xtr-&M%L&~RQ(2AzJ%C8m3gnQ~G){9d!o zbSCxNrrCPsc2b|T%i6IW^NGG+Wg%_pda>R)uVbVxN?HmiisZ4Sf)pApsPKPztB4W@KCw|1Gurgu2=OuwkdI1Q<{WLr^3my*!q zBs&ix_L>?^#=0YeB!iKjnYdR?|0u$M!SiPnC0iABZ2MQpPN!OJvks`Jb}~Z@e81?sAI?_6h7cWvt6xdBU^X>c zJd$GOFzh`t3I`XD)L*rs4RwGtSkh!h$FFpdU)O1dYzm0VZ&#ia8^ynJ<*uETkDiEU zp$RFr1;ZwDxmlsRW_{NB#qt-^r}OG6Me%F9d=djaHrBCv7|#r~90Lgc<;YBAxI50N`lX3^z4k zUF4#){vOj70VEwH5QK19uUFMa#0%}Jm4f~f{mrX)_{8I+-%`4KtpK%g;iFF{7Lima zxnf)lvx~lRwhCWM9%+482_ET3q*}UKcf$Eh@ncqE%&xT|@usa;01a2=W-%2tQ21PlFwqCI65rZfue}v=@N-T;dG*reB5s=^1ckix$&Yybr6!~JyI`)CX=E4Rjt6w*er9yYIx1ez~q>4{_*%WuEIju5O)hWhh zOqlRBoN0yKUK|Rug+wEi_!Z8eJ)%edp{NTBeVDm7N0V9(@?&3j_C&H0SM=zN+wpf(k`X$GjH?57Mb)T9HUwWs^%)^kV@U$N{AD!!BMhyik?C$9F*Hg$#n ztd0R*sS|*|twwMflSn!?$^6c3?6@2k82n(aTR46uUi#ukP(=KKfDj*ix%CKEBKRn< z7PQx-{TBK_2C|@=&J^p{%W^)Ro1Gu&EaZ^*`h6aZ+a6_^3|p7xw1iPazAHiO)hk&^ z@#z`)^ORBFbKc3ph6cEhi4b;|Qv|jYLK?FMs`X+N)t!M7zCbO3j^3csMzaxH9c@+u z`qhH+)VZ6UR~Sm^deKu(_WwfpSogcfJ?wK9hCBuvMkM1F+sg8H^0W>0a^@v`daY4I zX!x(rlR%~$M=H#X=!c|ycB~KT^8WuEimR`HO+?rSLmiL>JXxe)`=_@+Y1vsL0jpAr zQMY=hqZ%3T`p97<`N7+9#d5|!qc6R&R?9|VHcGGXU-7o}!lnr*EIc_BxkX5r+&U3H z8bD+FbOm{d_{Y-^ht00fAaIg0KXJN7J0XrD{v3 z`)FwqS)aHWz2<>;U8c?QD37#TxEjt)$^8R|^yo3a!s>#QGRitE=n7IE+X7*r4 z2teu9OEgRTt{}tB*EWmHFQ~hYC$^W59i8EA`Na%O%|Ra2=)!H+gLsMl-uvo~G}@sv zB`FRx?OAe9lH*(JLG?*(609MC5^2;!T+Qm~3`J~>Ll22vg(2$PY4>r5slT3>?1K#M z3mCE6Ud^|0g1L$v>s5amejISdoNO|Gj(|ZzCrk zQhmm%w0Z~u7}zti%uVoB^Qw%TCV{%Cym>*LRrWNY!vtZ3b0{x@@bB+_D2D95HdSNhT!RDags#R4;4pTZs_^II_{K9y~*LB>!ZC@}K zmPObg|M?!octwi8IspJU9-LEEoJuMo9Pl+KkDx3_x7rZ3&L>-pJrk;I5C_DM0kz2U zNh1K{l}z16{H_;f%TZ>?md>KHa3hb#|0_y3>iOqLE?h8cKq?nSpSVbUQk+h9S5G}c z3qcZG+2>d}$SJJpO1Ie4qc?U)8zAN8yn>lb;(PP(gJE5dPr$)CLhUyV%Z`%!@uhFs z@h8N@zSr6+g5ny02KOrne!d$nI$Nx4nnRm`d;U+D<;DkLF_k+yUKU3O2gzP89N^wm z#tuehj?KQ~tCL-EhPclm3$kPGRq#c=uckV)=|njenkA&-0&#BFQS|<7{nIUU|Ck@{ z(<8nuvBha58<)8fZr6Xxi!S3}EySA!F)byYA?>voPfpr1qj~Q+SH0iED$Nss^aEil zhdz@R2DxEtsQ-A2QG6%0Q^VJmR!iFA$P?3{KZrQZNbue-7K^Vy3H>XBli{?PQp_%~ zoDy`?a7SoM)>e`L*&T)$k-lKm$6CjuF(9}8#QFI@XHU*Ix zO}Vkv>VU(FuepTEUs`un^vI^F;xKJlQoEY(Vp`wBE*D3H2y+5y;;`Is?Hb>ci%YHy zuJWvk$IDYXK|^|lV_CB9t#?&K&90bSea6{}Rs?z7!Gyv*y>~f7N*NaSeG{YD19feN zPW#KsKIxrbY9z#m=HbY9h#@9!J&Fs&rPRjB-~y_pw_`E_LJ`-rK3u&7N4xAlPkSuv z!Ggg5ZpSxKxTzhu&jhHX4h+@O)8dxOk>4eQ%vj0_lQWHSV>z>2PGfiS>B zTFI(*GV{ZwzY5Kaw)ZJD4`fIzoc}GjQGdF>nZ?Ft&fcGM1N>ceolz)|W^}qsf8txc zRc1vjfP7-gBvqOb41yYVH!(8sYpopS6rIE-*RQwI%M#U9+)b;M%L`3Rb&q{8m%K8n z%KIR<*87{#ba&<=vBNWD1q-LfgX$syjBt{iDH7A=d?a|AO#o5jBMh0KH;7DBl@b+< zb~#$V`yeb-6wSa&pw$ye&7H}ZLo(AXA;1dU6l4?rwERj_oW%k2RjSm!YwCFj-ae~A z(WHXCfMr3UgqR{*O0r(kjNcrt;X1Rxq$4L+%a}&;=&5iN{uVP}F>>Q<{C#m1M*pGi2*o_Hv@kA%Kd>lAQKXaw<$(#->8tU%0+Jp(i&{->1P zm>N}+1!n>c{?Kw25eTpi==>Ga(qz-JRP#*0GB+1ux=3G2jfRSB0uyq7(cT73D+zE?(rnGey8G@%UOARrPv+~J?iCnjHQA^dZ==||&gbmKu_~_f4}*Z}Y_|OrC0V=xX4m&-XxlAh4V^7`llR6< zBHLXUMvW^`s{q;sS*%p&G&PybL*}y(8{?B1kC5Qznaj^2UP4?{mCb*e@knzaVx{Yx zlZ&;wGa(oMOMM~ejLlfhJNC_k?8wL=pmD`&Jzl;HD!$z=ES|>jBw>!eEHuv;JuO<( zE2%Phn^xAQ&V}y8j=M6HM99~RsL~o4J{C90EB;t^`meQu0 zTH5T*vP*|kddIfY-|uhOX9-a9SY;F$%sWE$;AyrvnahLp(`um&jtt?PSCzp|S+KsK z$T&7Nwq)Fn*e$-M4qX^M+j<<9C%k3fgVVbB71CXEut+wG!oQV5v7eeDrazshQk