From 4011dfb4ece2db899d331d48c67d7a77e7db46cc Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 14 Nov 2023 01:07:34 +0000 Subject: [PATCH] Support Hygon CSV/CSV2 live migration, CSV2 reboot When migrate a CSV/CSV2 guest, the sender need retrive cert chains of the target host (i.e. receiver), this cert chains will help to negotiate a shared keys between sender and target host. The sender issues SEND_START, SEND_UPDATE_DATA, SEND_FINISH APIs to migrate the encrypted pages to target host, and the receiver issues RECEIVE_START, RECEIVE_UPDATE_DATA, RECEIVE_FINISH to restore the encrypted pages. In addition, the GHCB states and VMSAs are also need migrated to target host, so we introduce interface to support set/get/migrate GHCB states and make use of SEND_UPDATE_VMSA, RECEIVE_UPDATE_VMSA APIs to migrate VMSAs. When launch a CSV2 guest, the vCPU register states will be encrypted and prevent change by the host. In order to support reboot a CSV2 guest, we introduce kvm ioctl interfaces to notify KVM to reset the vCPU states to its initial encrypted states. Signed-off-by: hanliyang --- ...D-SEV-to-include-Live-migration-flow.patch | 69 +++ ...dd-AMD-SEV-specific-migration-parame.patch | 243 ++++++++++ ...st-support-introduce-ConfidentialGue.patch | 67 +++ ...provide-callback-to-setup-outgoing-c.patch | 135 ++++++ ...do-not-create-launch-context-for-an-.patch | 48 ++ ...add-support-to-encrypt-the-outgoing-.patch | 324 ++++++++++++++ ...add-support-to-load-incoming-encrypt.patch | 224 ++++++++++ ...for-SEV-shared-regions-list-and-KVM_.patch | 293 ++++++++++++ ...pport-to-migrate-shared-regions-list.patch | 103 +++++ ...-add-support-to-send-encrypted-pages.patch | 338 ++++++++++++++ ...rce-encrypted-status-for-flash0-flas.patch | 44 ++ ...V-live-migration-bump-downtime-limit.patch | 64 +++ ...for-userspace-MSR-filtering-and-hand.patch | 109 +++++ ...-ram-Force-encrypted-status-for-VGA-.patch | 33 ++ ...86-sev-Clear-shared_regions_list-whe.patch | 57 +++ ...-ram-Fix-calculation-of-gfn-correpon.patch | 57 +++ ...86-csv-Move-is_hygon_cpu-to-header-f.patch | 85 ++++ ...86-csv-Read-cert-chain-from-file-whe.patch | 131 ++++++ ...86-csv-add-support-to-queue-the-outg.patch | 270 ++++++++++++ ...86-csv-add-support-to-encrypt-the-ou.patch | 204 +++++++++ ...86-csv-add-support-to-queue-the-inco.patch | 171 +++++++ ...86-csv-add-support-to-load-incoming-.patch | 108 +++++ ...-ram-Accelerate-the-transmission-of-.patch | 229 ++++++++++ ...-ram-Accelerate-the-loading-of-CSV-g.patch | 37 ++ ...86-csv-Add-support-for-migrate-VMSA-.patch | 416 ++++++++++++++++++ ...rget-i386-get-set-migrate-GHCB-state.patch | 175 ++++++++ ...86-kvm-Return-resettable-when-emulat.patch | 39 ++ ...olis-kvm-Add-support-for-CSV2-reboot.patch | 170 +++++++ qemu.spec | 61 ++- 29 files changed, 4303 insertions(+), 1 deletion(-) create mode 100644 0010-doc-update-AMD-SEV-to-include-Live-migration-flow.patch create mode 100644 0011-migration.json-add-AMD-SEV-specific-migration-parame.patch create mode 100644 0012-confidential-guest-support-introduce-ConfidentialGue.patch create mode 100644 0013-target-i386-sev-provide-callback-to-setup-outgoing-c.patch create mode 100644 0014-target-i386-sev-do-not-create-launch-context-for-an-.patch create mode 100644 0015-target-i386-sev-add-support-to-encrypt-the-outgoing-.patch create mode 100644 0016-target-i386-sev-add-support-to-load-incoming-encrypt.patch create mode 100644 0017-kvm-Add-support-for-SEV-shared-regions-list-and-KVM_.patch create mode 100644 0018-migration-add-support-to-migrate-shared-regions-list.patch create mode 100644 0019-migration-ram-add-support-to-send-encrypted-pages.patch create mode 100644 0020-migration-ram-Force-encrypted-status-for-flash0-flas.patch create mode 100644 0021-migration-for-SEV-live-migration-bump-downtime-limit.patch create mode 100644 0022-kvm-Add-support-for-userspace-MSR-filtering-and-hand.patch create mode 100644 0023-anolis-migration-ram-Force-encrypted-status-for-VGA-.patch create mode 100644 0024-anolis-target-i386-sev-Clear-shared_regions_list-whe.patch create mode 100644 0025-anolis-migration-ram-Fix-calculation-of-gfn-correpon.patch create mode 100644 0026-anolis-target-i386-csv-Move-is_hygon_cpu-to-header-f.patch create mode 100644 0027-anolis-target-i386-csv-Read-cert-chain-from-file-whe.patch create mode 100644 0028-anolis-target-i386-csv-add-support-to-queue-the-outg.patch create mode 100644 0029-anolis-target-i386-csv-add-support-to-encrypt-the-ou.patch create mode 100644 0030-anolis-target-i386-csv-add-support-to-queue-the-inco.patch create mode 100644 0031-anolis-target-i386-csv-add-support-to-load-incoming-.patch create mode 100644 0032-anolis-migration-ram-Accelerate-the-transmission-of-.patch create mode 100644 0033-anolis-migration-ram-Accelerate-the-loading-of-CSV-g.patch create mode 100644 0034-anolis-target-i386-csv-Add-support-for-migrate-VMSA-.patch create mode 100644 0035-anolis-target-i386-get-set-migrate-GHCB-state.patch create mode 100644 0036-anolis-target-i386-kvm-Return-resettable-when-emulat.patch create mode 100644 0037-anolis-kvm-Add-support-for-CSV2-reboot.patch diff --git a/0010-doc-update-AMD-SEV-to-include-Live-migration-flow.patch b/0010-doc-update-AMD-SEV-to-include-Live-migration-flow.patch new file mode 100644 index 0000000..c942b95 --- /dev/null +++ b/0010-doc-update-AMD-SEV-to-include-Live-migration-flow.patch @@ -0,0 +1,69 @@ +From 10a13faeeefb654c5f0523bc8eaa317d80cf1fa7 Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Thu, 7 May 2020 22:26:17 +0000 +Subject: [PATCH 01/28] doc: update AMD SEV to include Live migration flow + +cherry-picked from https://github.com/AMDESE/qemu/commit/0e2b3d80e3. + +Reviewed-by: Dr. David Alan Gilbert +Signed-off-by: Brijesh Singh +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + docs/system/i386/amd-memory-encryption.rst | 40 +++++++++++++++++++++- + 1 file changed, 39 insertions(+), 1 deletion(-) + +diff --git a/docs/system/i386/amd-memory-encryption.rst b/docs/system/i386/amd-memory-encryption.rst +index dcf4add0e..2460f1488 100644 +--- a/docs/system/i386/amd-memory-encryption.rst ++++ b/docs/system/i386/amd-memory-encryption.rst +@@ -177,7 +177,45 @@ TODO + Live Migration + --------------- + +-TODO ++AMD SEV encrypts the memory of VMs and because a different key is used ++in each VM, the hypervisor will be unable to simply copy the ++ciphertext from one VM to another to migrate the VM. Instead the AMD SEV Key ++Management API provides sets of function which the hypervisor can use ++to package a guest page for migration, while maintaining the confidentiality ++provided by AMD SEV. ++ ++SEV guest VMs have the concept of private and shared memory. The private ++memory is encrypted with the guest-specific key, while shared memory may ++be encrypted with the hypervisor key. The migration APIs provided by the ++SEV API spec should be used for migrating the private pages. The ++KVM_GET_PAGE_ENC_BITMAP ioctl can be used to get the guest page encryption ++bitmap. The bitmap can be used to check if the given guest page is ++private or shared. ++ ++Before initiating the migration, we need to know the targets machine's public ++Diffie-Hellman key (PDH) and certificate chain. It can be retrieved ++with the 'query-sev-capabilities' QMP command or using the sev-tool. The ++migrate-set-parameter can be used to pass the target machine's PDH and ++certificate chain. ++ ++During the migration flow, the SEND_START is called on the source hypervisor ++to create an outgoing encryption context. The SEV guest policy dictates whether ++the certificate passed through the migrate-sev-set-info command will be ++validated. SEND_UPDATE_DATA is called to encrypt the guest private pages. ++After migration is completed, SEND_FINISH is called to destroy the encryption ++context and make the VM non-runnable to protect it against cloning. ++ ++On the target machine, RECEIVE_START is called first to create an ++incoming encryption context. The RECEIVE_UPDATE_DATA is called to copy ++the received encrypted page into guest memory. After migration has ++completed, RECEIVE_FINISH is called to make the VM runnable. ++ ++For more information about the migration see SEV API Appendix A ++Usage flow (Live migration section). ++ ++NOTE: ++To protect against the memory clone SEV APIs are designed to make the VM ++unrunnable in case of the migration failure. + + References + ---------- +-- +2.31.1 + diff --git a/0011-migration.json-add-AMD-SEV-specific-migration-parame.patch b/0011-migration.json-add-AMD-SEV-specific-migration-parame.patch new file mode 100644 index 0000000..40b8bdf --- /dev/null +++ b/0011-migration.json-add-AMD-SEV-specific-migration-parame.patch @@ -0,0 +1,243 @@ +From 5f398e56319d46d9f3bcd5bd367e1355a15910a5 Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Tue, 27 Jul 2021 11:27:00 +0000 +Subject: [PATCH 02/28] migration.json: add AMD SEV specific migration + parameters + +cherry-picked from https://github.com/AMDESE/qemu/commit/d6a23bde6b6e. + +AMD SEV migration flow requires that target machine's public Diffie-Hellman +key (PDH) and certificate chain must be passed before initiating the guest +migration. User can use QMP 'migrate-set-parameters' to pass the certificate +chain. The certificate chain will be used while creating the outgoing +encryption context. + +Signed-off-by: Brijesh Singh +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + migration/migration.c | 63 +++++++++++++++++++++++++++++++++++++++++++ + monitor/hmp-cmds.c | 18 +++++++++++++ + qapi/migration.json | 40 ++++++++++++++++++++++++--- + 3 files changed, 118 insertions(+), 3 deletions(-) + +diff --git a/migration/migration.c b/migration/migration.c +index c19fb5cb3..0e8bb4837 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -955,6 +955,12 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) + params->announce_rounds = s->parameters.announce_rounds; + params->has_announce_step = true; + params->announce_step = s->parameters.announce_step; ++ params->has_sev_pdh = true; ++ params->sev_pdh = g_strdup(s->parameters.sev_pdh); ++ params->has_sev_plat_cert = true; ++ params->sev_plat_cert = g_strdup(s->parameters.sev_plat_cert); ++ params->has_sev_amd_cert = true; ++ params->sev_amd_cert = g_strdup(s->parameters.sev_amd_cert); + + if (s->parameters.has_block_bitmap_mapping) { + params->has_block_bitmap_mapping = true; +@@ -1681,6 +1687,19 @@ static void migrate_params_test_apply(MigrateSetParameters *params, + dest->has_block_bitmap_mapping = true; + dest->block_bitmap_mapping = params->block_bitmap_mapping; + } ++ ++ if (params->has_sev_pdh) { ++ assert(params->sev_pdh->type == QTYPE_QSTRING); ++ dest->sev_pdh = g_strdup(params->sev_pdh->u.s); ++ } ++ if (params->has_sev_plat_cert) { ++ assert(params->sev_plat_cert->type == QTYPE_QSTRING); ++ dest->sev_plat_cert = g_strdup(params->sev_plat_cert->u.s); ++ } ++ if (params->has_sev_amd_cert) { ++ assert(params->sev_amd_cert->type == QTYPE_QSTRING); ++ dest->sev_amd_cert = g_strdup(params->sev_amd_cert->u.s); ++ } + } + + static void migrate_params_apply(MigrateSetParameters *params, Error **errp) +@@ -1803,6 +1822,22 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) + QAPI_CLONE(BitmapMigrationNodeAliasList, + params->block_bitmap_mapping); + } ++ ++ if (params->has_sev_pdh) { ++ g_free(s->parameters.sev_pdh); ++ assert(params->sev_pdh->type == QTYPE_QSTRING); ++ s->parameters.sev_pdh = g_strdup(params->sev_pdh->u.s); ++ } ++ if (params->has_sev_plat_cert) { ++ g_free(s->parameters.sev_plat_cert); ++ assert(params->sev_plat_cert->type == QTYPE_QSTRING); ++ s->parameters.sev_plat_cert = g_strdup(params->sev_plat_cert->u.s); ++ } ++ if (params->has_sev_amd_cert) { ++ g_free(s->parameters.sev_amd_cert); ++ assert(params->sev_amd_cert->type == QTYPE_QSTRING); ++ s->parameters.sev_amd_cert = g_strdup(params->sev_amd_cert->u.s); ++ } + } + + void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) +@@ -1823,6 +1858,27 @@ void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) + params->tls_hostname->type = QTYPE_QSTRING; + params->tls_hostname->u.s = strdup(""); + } ++ /* TODO Rewrite "" to null instead */ ++ if (params->has_sev_pdh ++ && params->sev_pdh->type == QTYPE_QNULL) { ++ qobject_unref(params->sev_pdh->u.n); ++ params->sev_pdh->type = QTYPE_QSTRING; ++ params->sev_pdh->u.s = strdup(""); ++ } ++ /* TODO Rewrite "" to null instead */ ++ if (params->has_sev_plat_cert ++ && params->sev_plat_cert->type == QTYPE_QNULL) { ++ qobject_unref(params->sev_plat_cert->u.n); ++ params->sev_plat_cert->type = QTYPE_QSTRING; ++ params->sev_plat_cert->u.s = strdup(""); ++ } ++ /* TODO Rewrite "" to null instead */ ++ if (params->has_sev_amd_cert ++ && params->sev_amd_cert->type == QTYPE_QNULL) { ++ qobject_unref(params->sev_amd_cert->u.n); ++ params->sev_amd_cert->type = QTYPE_QSTRING; ++ params->sev_amd_cert->u.s = strdup(""); ++ } + + migrate_params_test_apply(params, &tmp); + +@@ -4498,6 +4554,13 @@ static void migration_instance_init(Object *obj) + params->has_tls_hostname = true; + params->has_tls_authz = true; + ++ params->sev_pdh = g_strdup(""); ++ params->sev_plat_cert = g_strdup(""); ++ params->sev_amd_cert = g_strdup(""); ++ params->has_sev_pdh = true; ++ params->has_sev_plat_cert = true; ++ params->has_sev_amd_cert = true; ++ + qemu_sem_init(&ms->postcopy_pause_sem, 0); + qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); + qemu_sem_init(&ms->rp_state.rp_sem, 0); +diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c +index 01b789a79..1106fbb89 100644 +--- a/monitor/hmp-cmds.c ++++ b/monitor/hmp-cmds.c +@@ -1332,6 +1332,24 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) + error_setg(&err, "The block-bitmap-mapping parameter can only be set " + "through QMP"); + break; ++ case MIGRATION_PARAMETER_SEV_PDH: ++ p->has_sev_pdh = true; ++ p->sev_pdh = g_new0(StrOrNull, 1); ++ p->sev_pdh->type = QTYPE_QSTRING; ++ visit_type_str(v, param, &p->sev_pdh->u.s, &err); ++ break; ++ case MIGRATION_PARAMETER_SEV_PLAT_CERT: ++ p->has_sev_plat_cert = true; ++ p->sev_plat_cert = g_new0(StrOrNull, 1); ++ p->sev_plat_cert->type = QTYPE_QSTRING; ++ visit_type_str(v, param, &p->sev_plat_cert->u.s, &err); ++ break; ++ case MIGRATION_PARAMETER_SEV_AMD_CERT: ++ p->has_sev_amd_cert = true; ++ p->sev_amd_cert = g_new0(StrOrNull, 1); ++ p->sev_amd_cert->type = QTYPE_QSTRING; ++ visit_type_str(v, param, &p->sev_amd_cert->u.s, &err); ++ break; + default: + assert(0); + } +diff --git a/qapi/migration.json b/qapi/migration.json +index 88ecf86ac..4300a578f 100644 +--- a/qapi/migration.json ++++ b/qapi/migration.json +@@ -776,6 +776,15 @@ + # block device name if there is one, and to their node name + # otherwise. (Since 5.2) + # ++# @sev-pdh: The target host platform diffie-hellman key encoded in base64 ++# (Since 4.2) ++# ++# @sev-plat-cert: The target host platform certificate chain encoded in base64 ++# (Since 4.2) ++# ++# @sev-amd-cert: AMD certificate chain which include ASK and OCA encoded in ++# base64 (Since 4.2) ++# + # Features: + # @unstable: Member @x-checkpoint-delay is experimental. + # +@@ -796,7 +805,8 @@ + 'xbzrle-cache-size', 'max-postcopy-bandwidth', + 'max-cpu-throttle', 'multifd-compression', + 'multifd-zlib-level' ,'multifd-zstd-level', +- 'block-bitmap-mapping' ] } ++ 'block-bitmap-mapping', ++ 'sev-pdh', 'sev-plat-cert', 'sev-amd-cert' ] } + + ## + # @MigrateSetParameters: +@@ -941,6 +951,15 @@ + # block device name if there is one, and to their node name + # otherwise. (Since 5.2) + # ++# @sev-pdh: The target host platform diffie-hellman key encoded in base64 ++# (Since 4.2) ++# ++# @sev-plat-cert: The target host platform certificate chain encoded in base64 ++# (Since 4.2) ++# ++# @sev-amd-cert: AMD certificate chain which include ASK and OCA encoded in ++# base64 (Since 4.2) ++# + # Features: + # @unstable: Member @x-checkpoint-delay is experimental. + # +@@ -976,7 +995,10 @@ + '*multifd-compression': 'MultiFDCompression', + '*multifd-zlib-level': 'uint8', + '*multifd-zstd-level': 'uint8', +- '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } ++ '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], ++ '*sev-pdh':'StrOrNull', ++ '*sev-plat-cert': 'StrOrNull', ++ '*sev-amd-cert' : 'StrOrNull' } } + + ## + # @migrate-set-parameters: +@@ -1141,6 +1163,15 @@ + # block device name if there is one, and to their node name + # otherwise. (Since 5.2) + # ++# @sev-pdh: The target host platform diffie-hellman key encoded in base64 ++# (Since 4.2) ++# ++# @sev-plat-cert: The target host platform certificate chain encoded in base64 ++# (Since 4.2) ++# ++# @sev-amd-cert: AMD certificate chain which include ASK and OCA encoded in ++# base64 (Since 4.2) ++# + # Features: + # @unstable: Member @x-checkpoint-delay is experimental. + # +@@ -1174,7 +1205,10 @@ + '*multifd-compression': 'MultiFDCompression', + '*multifd-zlib-level': 'uint8', + '*multifd-zstd-level': 'uint8', +- '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } ++ '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], ++ '*sev-pdh':'str', ++ '*sev-plat-cert': 'str', ++ '*sev-amd-cert' : 'str'} } + + ## + # @query-migrate-parameters: +-- +2.31.1 + diff --git a/0012-confidential-guest-support-introduce-ConfidentialGue.patch b/0012-confidential-guest-support-introduce-ConfidentialGue.patch new file mode 100644 index 0000000..6a16053 --- /dev/null +++ b/0012-confidential-guest-support-introduce-ConfidentialGue.patch @@ -0,0 +1,67 @@ +From 46f8f72652049e89b7739b3f114e8cb6463f8a27 Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Tue, 27 Jul 2021 11:41:37 +0000 +Subject: [PATCH 03/28] confidential guest support: introduce + ConfidentialGuestMemoryEncryptionOps for encrypted VMs + +cherry-picked from https://github.com/AMDESE/qemu/commit/74fce7be9bd. + +When memory encryption is enabled in VM, the guest RAM will be encrypted +with the guest-specific key, to protect the confidentiality of data while +in transit we need to platform specific hooks to save or migrate the +guest RAM. + +Introduce the new ConfidentialGuestMemoryEncryptionOps in this patch +which will be later used by the encrypted guest for migration. + +Signed-off-by: Brijesh Singh +Co-developed-by: Ashish Kalra +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + include/exec/confidential-guest-support.h | 27 +++++++++++++++++++++++ + 1 file changed, 27 insertions(+) + +diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h +index ba2dd4b5d..343f686fc 100644 +--- a/include/exec/confidential-guest-support.h ++++ b/include/exec/confidential-guest-support.h +@@ -53,8 +53,35 @@ struct ConfidentialGuestSupport { + bool ready; + }; + ++/** ++ * The functions registers with ConfidentialGuestMemoryEncryptionOps will be ++ * used during the encrypted guest migration. ++ */ ++struct ConfidentialGuestMemoryEncryptionOps { ++ /* Initialize the platform specific state before starting the migration */ ++ int (*save_setup)(const char *pdh, const char *plat_cert, ++ const char *amd_cert); ++ ++ /* Write the encrypted page and metadata associated with it */ ++ int (*save_outgoing_page)(QEMUFile *f, uint8_t *ptr, uint32_t size, ++ uint64_t *bytes_sent); ++ ++ /* Load the incoming encrypted page into guest memory */ ++ int (*load_incoming_page)(QEMUFile *f, uint8_t *ptr); ++ ++ /* Check if gfn is in shared/unencrypted region */ ++ bool (*is_gfn_in_unshared_region)(unsigned long gfn); ++ ++ /* Write the shared regions list */ ++ int (*save_outgoing_shared_regions_list)(QEMUFile *f); ++ ++ /* Load the shared regions list */ ++ int (*load_incoming_shared_regions_list)(QEMUFile *f); ++}; ++ + typedef struct ConfidentialGuestSupportClass { + ObjectClass parent; ++ struct ConfidentialGuestMemoryEncryptionOps *memory_encryption_ops; + } ConfidentialGuestSupportClass; + + #endif /* !CONFIG_USER_ONLY */ +-- +2.31.1 + diff --git a/0013-target-i386-sev-provide-callback-to-setup-outgoing-c.patch b/0013-target-i386-sev-provide-callback-to-setup-outgoing-c.patch new file mode 100644 index 0000000..d857682 --- /dev/null +++ b/0013-target-i386-sev-provide-callback-to-setup-outgoing-c.patch @@ -0,0 +1,135 @@ +From 0eb80743631c3dc13304585e4ebdc1977a14291f Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Tue, 27 Jul 2021 12:10:23 +0000 +Subject: [PATCH 04/28] target/i386: sev: provide callback to setup outgoing + context + +cherry-picked from https://github.com/AMDESE/qemu/commit/7521883afc0. + +The user provides the target machine's Platform Diffie-Hellman key (PDH) +and certificate chain before starting the SEV guest migration. Cache the +certificate chain as we need them while creating the outgoing context. + +Signed-off-by: Brijesh Singh +Co-developed-by: Ashish Kalra +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + target/i386/sev.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++ + target/i386/sev.h | 2 ++ + 2 files changed, 61 insertions(+) + +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 6a672c439..5a574c636 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -76,6 +76,12 @@ struct SevGuestState { + int sev_fd; + SevState state; + gchar *measurement; ++ guchar *remote_pdh; ++ size_t remote_pdh_len; ++ guchar *remote_plat_cert; ++ size_t remote_plat_cert_len; ++ guchar *amd_cert; ++ size_t amd_cert_len; + + uint32_t reset_cs; + uint32_t reset_ip; +@@ -160,6 +166,12 @@ static const char *const sev_fw_errlist[] = { + + #define SEV_FW_MAX_ERROR ARRAY_SIZE(sev_fw_errlist) + ++#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ ++ ++static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { ++ .save_setup = sev_save_setup, ++}; ++ + static int + sev_ioctl(int fd, int cmd, void *data, int *error) + { +@@ -912,6 +924,48 @@ sev_vm_state_change(void *opaque, bool running, RunState state) + } + } + ++static inline bool check_blob_length(size_t value) ++{ ++ if (value > SEV_FW_BLOB_MAX_SIZE) { ++ error_report("invalid length max=%d got=%ld", ++ SEV_FW_BLOB_MAX_SIZE, value); ++ return false; ++ } ++ ++ return true; ++} ++ ++int sev_save_setup(const char *pdh, const char *plat_cert, ++ const char *amd_cert) ++{ ++ SevGuestState *s = sev_guest; ++ ++ s->remote_pdh = g_base64_decode(pdh, &s->remote_pdh_len); ++ if (!check_blob_length(s->remote_pdh_len)) { ++ goto error; ++ } ++ ++ s->remote_plat_cert = g_base64_decode(plat_cert, ++ &s->remote_plat_cert_len); ++ if (!check_blob_length(s->remote_plat_cert_len)) { ++ goto error; ++ } ++ ++ s->amd_cert = g_base64_decode(amd_cert, &s->amd_cert_len); ++ if (!check_blob_length(s->amd_cert_len)) { ++ goto error; ++ } ++ ++ return 0; ++ ++error: ++ g_free(s->remote_pdh); ++ g_free(s->remote_plat_cert); ++ g_free(s->amd_cert); ++ ++ return 1; ++} ++ + int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + { + SevGuestState *sev +@@ -926,6 +980,9 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + return 0; + } + ++ ConfidentialGuestSupportClass *cgs_class = ++ (ConfidentialGuestSupportClass *) object_get_class(OBJECT(cgs)); ++ + ret = ram_block_discard_disable(true); + if (ret) { + error_report("%s: cannot disable RAM discard", __func__); +@@ -1020,6 +1077,8 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + qemu_add_machine_init_done_notifier(&sev_machine_done_notify); + qemu_add_vm_change_state_handler(sev_vm_state_change, sev); + ++ cgs_class->memory_encryption_ops = &sev_memory_encryption_ops; ++ + cgs->ready = true; + + return 0; +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 0a82f97f3..4edbad04e 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -51,6 +51,8 @@ extern uint32_t sev_get_reduced_phys_bits(void); + extern bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp); + + int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp); ++int sev_save_setup(const char *pdh, const char *plat_cert, ++ const char *amd_cert); + int sev_inject_launch_secret(const char *hdr, const char *secret, + uint64_t gpa, Error **errp); + +-- +2.31.1 + diff --git a/0014-target-i386-sev-do-not-create-launch-context-for-an-.patch b/0014-target-i386-sev-do-not-create-launch-context-for-an-.patch new file mode 100644 index 0000000..536a15b --- /dev/null +++ b/0014-target-i386-sev-do-not-create-launch-context-for-an-.patch @@ -0,0 +1,48 @@ +From de628e55a57024aabb5e78af457c2ee0e1de3012 Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Tue, 27 Jul 2021 12:16:09 +0000 +Subject: [PATCH 05/28] target/i386: sev: do not create launch context for an + incoming guest + +cherry-picked from https://github.com/AMDESE/qemu/commit/b85694233495. + +The LAUNCH_START is used for creating an encryption context to encrypt +newly created guest, for an incoming guest the RECEIVE_START should be +used. + +Reviewed-by: Dr. David Alan Gilbert +Signed-off-by: Brijesh Singh +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + target/i386/sev.c | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 5a574c636..be3d86805 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -1064,10 +1064,16 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + } + } + +- ret = sev_launch_start(sev); +- if (ret) { +- error_setg(errp, "%s: failed to create encryption context", __func__); +- goto err; ++ /* ++ * The LAUNCH context is used for new guest, if its an incoming guest ++ * then RECEIVE context will be created after the connection is established. ++ */ ++ if (!runstate_check(RUN_STATE_INMIGRATE)) { ++ ret = sev_launch_start(sev); ++ if (ret) { ++ error_setg(errp, "%s: failed to create encryption context", __func__); ++ goto err; ++ } + } + + /* CSV guest needs no notifier to reg/unreg memory */ +-- +2.31.1 + diff --git a/0015-target-i386-sev-add-support-to-encrypt-the-outgoing-.patch b/0015-target-i386-sev-add-support-to-encrypt-the-outgoing-.patch new file mode 100644 index 0000000..89e67aa --- /dev/null +++ b/0015-target-i386-sev-add-support-to-encrypt-the-outgoing-.patch @@ -0,0 +1,324 @@ +From 69450af0f725c369073054dd71e09e161e517f6a Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Tue, 27 Jul 2021 12:55:25 +0000 +Subject: [PATCH 06/28] target/i386: sev: add support to encrypt the outgoing + page + +cherry-picked from https://github.com/AMDESE/qemu/commit/5187c6f86bd. + +The sev_save_outgoing_page() provide the implementation to encrypt the +guest private pages during the transit. The routines uses the SEND_START +command to create the outgoing encryption context on the first call then +uses the SEND_UPDATE_DATA command to encrypt the data before writing it +to the socket. While encrypting the data SEND_UPDATE_DATA produces some +metadata (e.g MAC, IV). The metadata is also sent to the target machine. +After migration is completed, we issue the SEND_FINISH command to transition +the SEV guest state from sending to unrunnable state. + +Signed-off-by: Brijesh Singh +Co-developed-by: Ashish Kalra +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + target/i386/sev.c | 221 +++++++++++++++++++++++++++++++++++++++ + target/i386/sev.h | 2 + + target/i386/trace-events | 3 + + 3 files changed, 226 insertions(+) + +diff --git a/target/i386/sev.c b/target/i386/sev.c +index be3d86805..27160c396 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -31,6 +31,8 @@ + #include "sysemu/runstate.h" + #include "trace.h" + #include "migration/blocker.h" ++#include "migration/qemu-file.h" ++#include "migration/misc.h" + #include "qom/object.h" + #include "monitor/monitor.h" + #include "monitor/hmp-target.h" +@@ -82,6 +84,8 @@ struct SevGuestState { + size_t remote_plat_cert_len; + guchar *amd_cert; + size_t amd_cert_len; ++ gchar *send_packet_hdr; ++ size_t send_packet_hdr_len; + + uint32_t reset_cs; + uint32_t reset_ip; +@@ -170,6 +174,7 @@ static const char *const sev_fw_errlist[] = { + + static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .save_setup = sev_save_setup, ++ .save_outgoing_page = sev_save_outgoing_page, + }; + + static int +@@ -966,6 +971,40 @@ error: + return 1; + } + ++static void ++sev_send_finish(void) ++{ ++ int ret, error; ++ ++ trace_kvm_sev_send_finish(); ++ ret = sev_ioctl(sev_guest->sev_fd, KVM_SEV_SEND_FINISH, 0, &error); ++ if (ret) { ++ error_report("%s: SEND_FINISH ret=%d fw_error=%d '%s'", ++ __func__, ret, error, fw_error_to_str(error)); ++ } ++ ++ g_free(sev_guest->send_packet_hdr); ++ sev_set_guest_state(sev_guest, SEV_STATE_RUNNING); ++} ++ ++static void ++sev_migration_state_notifier(Notifier *notifier, void *data) ++{ ++ MigrationState *s = data; ++ ++ if (migration_has_finished(s) || ++ migration_in_postcopy_after_devices(s) || ++ migration_has_failed(s)) { ++ if (sev_check_state(sev_guest, SEV_STATE_SEND_UPDATE)) { ++ sev_send_finish(); ++ } ++ } ++} ++ ++static Notifier sev_migration_state_notify = { ++ .notify = sev_migration_state_notifier, ++}; ++ + int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + { + SevGuestState *sev +@@ -1082,6 +1121,7 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + } + qemu_add_machine_init_done_notifier(&sev_machine_done_notify); + qemu_add_vm_change_state_handler(sev_vm_state_change, sev); ++ add_migration_state_change_notifier(&sev_migration_state_notify); + + cgs_class->memory_encryption_ops = &sev_memory_encryption_ops; + +@@ -1323,6 +1363,187 @@ int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) + return 0; + } + ++static int ++sev_get_send_session_length(void) ++{ ++ int ret, fw_err = 0; ++ struct kvm_sev_send_start start = {}; ++ ++ ret = sev_ioctl(sev_guest->sev_fd, KVM_SEV_SEND_START, &start, &fw_err); ++ if (fw_err != SEV_RET_INVALID_LEN) { ++ ret = -1; ++ error_report("%s: failed to get session length ret=%d fw_error=%d '%s'", ++ __func__, ret, fw_err, fw_error_to_str(fw_err)); ++ goto err; ++ } ++ ++ ret = start.session_len; ++err: ++ return ret; ++} ++ ++static int ++sev_send_start(SevGuestState *s, QEMUFile *f, uint64_t *bytes_sent) ++{ ++ gsize pdh_len = 0, plat_cert_len; ++ int session_len, ret, fw_error; ++ struct kvm_sev_send_start start = { }; ++ guchar *pdh = NULL, *plat_cert = NULL, *session = NULL; ++ Error *local_err = NULL; ++ ++ if (!s->remote_pdh || !s->remote_plat_cert || !s->amd_cert_len) { ++ error_report("%s: missing remote PDH or PLAT_CERT", __func__); ++ return 1; ++ } ++ ++ start.pdh_cert_uaddr = (uintptr_t) s->remote_pdh; ++ start.pdh_cert_len = s->remote_pdh_len; ++ ++ start.plat_certs_uaddr = (uintptr_t)s->remote_plat_cert; ++ start.plat_certs_len = s->remote_plat_cert_len; ++ ++ start.amd_certs_uaddr = (uintptr_t)s->amd_cert; ++ start.amd_certs_len = s->amd_cert_len; ++ ++ /* get the session length */ ++ session_len = sev_get_send_session_length(); ++ if (session_len < 0) { ++ ret = 1; ++ goto err; ++ } ++ ++ session = g_new0(guchar, session_len); ++ start.session_uaddr = (unsigned long)session; ++ start.session_len = session_len; ++ ++ /* Get our PDH certificate */ ++ ret = sev_get_pdh_info(s->sev_fd, &pdh, &pdh_len, ++ &plat_cert, &plat_cert_len, &local_err); ++ if (ret) { ++ error_report("Failed to get our PDH cert"); ++ goto err; ++ } ++ ++ trace_kvm_sev_send_start(start.pdh_cert_uaddr, start.pdh_cert_len, ++ start.plat_certs_uaddr, start.plat_certs_len, ++ start.amd_certs_uaddr, start.amd_certs_len); ++ ++ ret = sev_ioctl(s->sev_fd, KVM_SEV_SEND_START, &start, &fw_error); ++ if (ret < 0) { ++ error_report("%s: SEND_START ret=%d fw_error=%d '%s'", ++ __func__, ret, fw_error, fw_error_to_str(fw_error)); ++ goto err; ++ } ++ ++ qemu_put_be32(f, start.policy); ++ qemu_put_be32(f, pdh_len); ++ qemu_put_buffer(f, (uint8_t *)pdh, pdh_len); ++ qemu_put_be32(f, start.session_len); ++ qemu_put_buffer(f, (uint8_t *)start.session_uaddr, start.session_len); ++ *bytes_sent = 12 + pdh_len + start.session_len; ++ ++ sev_set_guest_state(s, SEV_STATE_SEND_UPDATE); ++ ++err: ++ g_free(pdh); ++ g_free(plat_cert); ++ return ret; ++} ++ ++static int ++sev_send_get_packet_len(int *fw_err) ++{ ++ int ret; ++ struct kvm_sev_send_update_data update = { 0, }; ++ ++ ret = sev_ioctl(sev_guest->sev_fd, KVM_SEV_SEND_UPDATE_DATA, ++ &update, fw_err); ++ if (*fw_err != SEV_RET_INVALID_LEN) { ++ ret = -1; ++ error_report("%s: failed to get session length ret=%d fw_error=%d '%s'", ++ __func__, ret, *fw_err, fw_error_to_str(*fw_err)); ++ goto err; ++ } ++ ++ ret = update.hdr_len; ++ ++err: ++ return ret; ++} ++ ++static int ++sev_send_update_data(SevGuestState *s, QEMUFile *f, uint8_t *ptr, uint32_t size, ++ uint64_t *bytes_sent) ++{ ++ int ret, fw_error; ++ guchar *trans; ++ struct kvm_sev_send_update_data update = { }; ++ ++ /* ++ * If this is first call then query the packet header bytes and allocate ++ * the packet buffer. ++ */ ++ if (!s->send_packet_hdr) { ++ s->send_packet_hdr_len = sev_send_get_packet_len(&fw_error); ++ if (s->send_packet_hdr_len < 1) { ++ error_report("%s: SEND_UPDATE fw_error=%d '%s'", ++ __func__, fw_error, fw_error_to_str(fw_error)); ++ return 1; ++ } ++ ++ s->send_packet_hdr = g_new(gchar, s->send_packet_hdr_len); ++ } ++ ++ /* allocate transport buffer */ ++ trans = g_new(guchar, size); ++ ++ update.hdr_uaddr = (uintptr_t)s->send_packet_hdr; ++ update.hdr_len = s->send_packet_hdr_len; ++ update.guest_uaddr = (uintptr_t)ptr; ++ update.guest_len = size; ++ update.trans_uaddr = (uintptr_t)trans; ++ update.trans_len = size; ++ ++ trace_kvm_sev_send_update_data(ptr, trans, size); ++ ++ ret = sev_ioctl(s->sev_fd, KVM_SEV_SEND_UPDATE_DATA, &update, &fw_error); ++ if (ret) { ++ error_report("%s: SEND_UPDATE_DATA ret=%d fw_error=%d '%s'", ++ __func__, ret, fw_error, fw_error_to_str(fw_error)); ++ goto err; ++ } ++ ++ qemu_put_be32(f, update.hdr_len); ++ qemu_put_buffer(f, (uint8_t *)update.hdr_uaddr, update.hdr_len); ++ *bytes_sent = 4 + update.hdr_len; ++ ++ qemu_put_be32(f, update.trans_len); ++ qemu_put_buffer(f, (uint8_t *)update.trans_uaddr, update.trans_len); ++ *bytes_sent += (4 + update.trans_len); ++ ++err: ++ g_free(trans); ++ return ret; ++} ++ ++int sev_save_outgoing_page(QEMUFile *f, uint8_t *ptr, ++ uint32_t sz, uint64_t *bytes_sent) ++{ ++ SevGuestState *s = sev_guest; ++ ++ /* ++ * If this is a first buffer then create outgoing encryption context ++ * and write our PDH, policy and session data. ++ */ ++ if (!sev_check_state(s, SEV_STATE_SEND_UPDATE) && ++ sev_send_start(s, f, bytes_sent)) { ++ error_report("Failed to create outgoing context"); ++ return 1; ++ } ++ ++ return sev_send_update_data(s, f, ptr, sz, bytes_sent); ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 4edbad04e..90bce02f9 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -53,6 +53,8 @@ extern bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **er + int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp); + int sev_save_setup(const char *pdh, const char *plat_cert, + const char *amd_cert); ++int sev_save_outgoing_page(QEMUFile *f, uint8_t *ptr, ++ uint32_t size, uint64_t *bytes_sent); + int sev_inject_launch_secret(const char *hdr, const char *secret, + uint64_t gpa, Error **errp); + +diff --git a/target/i386/trace-events b/target/i386/trace-events +index b7da9bd74..c165e3737 100644 +--- a/target/i386/trace-events ++++ b/target/i386/trace-events +@@ -11,6 +11,9 @@ kvm_sev_launch_measurement(const char *value) "data %s" + kvm_sev_launch_finish(void) "" + kvm_sev_launch_secret(uint64_t hpa, uint64_t hva, uint64_t secret, int len) "hpa 0x%" PRIx64 " hva 0x%" PRIx64 " data 0x%" PRIx64 " len %d" + kvm_sev_attestation_report(const char *mnonce, const char *data) "mnonce %s data %s" ++kvm_sev_send_start(uint64_t pdh, int l1, uint64_t plat, int l2, uint64_t amd, int l3) "pdh 0x%" PRIx64 " len %d plat 0x%" PRIx64 " len %d amd 0x%" PRIx64 " len %d" ++kvm_sev_send_update_data(void *src, void *dst, int len) "guest %p trans %p len %d" ++kvm_sev_send_finish(void) "" + + # csv.c + kvm_csv_launch_encrypt_data(uint64_t gpa, void *addr, uint64_t len) "gpa 0x%" PRIx64 "addr %p len 0x%" PRIu64 +-- +2.31.1 + diff --git a/0016-target-i386-sev-add-support-to-load-incoming-encrypt.patch b/0016-target-i386-sev-add-support-to-load-incoming-encrypt.patch new file mode 100644 index 0000000..82ebf96 --- /dev/null +++ b/0016-target-i386-sev-add-support-to-load-incoming-encrypt.patch @@ -0,0 +1,224 @@ +From 605aeee57cb8f465e6887ba67cd9a457c588a668 Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Tue, 27 Jul 2021 13:00:50 +0000 +Subject: [PATCH 07/28] target/i386: sev: add support to load incoming + encrypted page + +cherry-picked from https://github.com/AMDESE/qemu/commit/e86e5dccb045. + +The sev_load_incoming_page() provide the implementation to read the +incoming guest private pages from the socket and load it into the guest +memory. The routines uses the RECEIVE_START command to create the +incoming encryption context on the first call then uses the +RECEIEVE_UPDATE_DATA command to load the encrypted pages into the guest +memory. After migration is completed, we issue the RECEIVE_FINISH command +to transition the SEV guest to the runnable state so that it can be +executed. + +Signed-off-by: Brijesh Singh +Co-developed-by: Ashish Kalra +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + target/i386/sev.c | 137 ++++++++++++++++++++++++++++++++++++++- + target/i386/sev.h | 1 + + target/i386/trace-events | 3 + + 3 files changed, 140 insertions(+), 1 deletion(-) + +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 27160c396..181bbe869 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -175,6 +175,7 @@ static const char *const sev_fw_errlist[] = { + static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .save_setup = sev_save_setup, + .save_outgoing_page = sev_save_outgoing_page, ++ .load_incoming_page = sev_load_incoming_page, + }; + + static int +@@ -917,13 +918,33 @@ sev_launch_finish(SevGuestState *sev) + migrate_add_blocker(sev_mig_blocker, &error_fatal); + } + ++static int ++sev_receive_finish(SevGuestState *s) ++{ ++ int error, ret = 1; ++ ++ trace_kvm_sev_receive_finish(); ++ ret = sev_ioctl(s->sev_fd, KVM_SEV_RECEIVE_FINISH, 0, &error); ++ if (ret) { ++ error_report("%s: RECEIVE_FINISH ret=%d fw_error=%d '%s'", ++ __func__, ret, error, fw_error_to_str(error)); ++ goto err; ++ } ++ ++ sev_set_guest_state(s, SEV_STATE_RUNNING); ++err: ++ return ret; ++} ++ + static void + sev_vm_state_change(void *opaque, bool running, RunState state) + { + SevGuestState *sev = opaque; + + if (running) { +- if (!sev_check_state(sev, SEV_STATE_RUNNING)) { ++ if (sev_check_state(sev, SEV_STATE_RECEIVE_UPDATE)) { ++ sev_receive_finish(sev); ++ } else if (!sev_check_state(sev, SEV_STATE_RUNNING)) { + sev_launch_finish(sev); + } + } +@@ -1544,6 +1565,120 @@ int sev_save_outgoing_page(QEMUFile *f, uint8_t *ptr, + return sev_send_update_data(s, f, ptr, sz, bytes_sent); + } + ++static int ++sev_receive_start(SevGuestState *sev, QEMUFile *f) ++{ ++ int ret = 1; ++ int fw_error; ++ struct kvm_sev_receive_start start = { }; ++ gchar *session = NULL, *pdh_cert = NULL; ++ ++ /* get SEV guest handle */ ++ start.handle = object_property_get_int(OBJECT(sev), "handle", ++ &error_abort); ++ ++ /* get the source policy */ ++ start.policy = qemu_get_be32(f); ++ ++ /* get source PDH key */ ++ start.pdh_len = qemu_get_be32(f); ++ if (!check_blob_length(start.pdh_len)) { ++ return 1; ++ } ++ ++ pdh_cert = g_new(gchar, start.pdh_len); ++ qemu_get_buffer(f, (uint8_t *)pdh_cert, start.pdh_len); ++ start.pdh_uaddr = (uintptr_t)pdh_cert; ++ ++ /* get source session data */ ++ start.session_len = qemu_get_be32(f); ++ if (!check_blob_length(start.session_len)) { ++ return 1; ++ } ++ session = g_new(gchar, start.session_len); ++ qemu_get_buffer(f, (uint8_t *)session, start.session_len); ++ start.session_uaddr = (uintptr_t)session; ++ ++ trace_kvm_sev_receive_start(start.policy, session, pdh_cert); ++ ++ ret = sev_ioctl(sev_guest->sev_fd, KVM_SEV_RECEIVE_START, ++ &start, &fw_error); ++ if (ret < 0) { ++ error_report("Error RECEIVE_START ret=%d fw_error=%d '%s'", ++ ret, fw_error, fw_error_to_str(fw_error)); ++ goto err; ++ } ++ ++ object_property_set_int(OBJECT(sev), "handle", start.handle, &error_abort); ++ sev_set_guest_state(sev, SEV_STATE_RECEIVE_UPDATE); ++err: ++ g_free(session); ++ g_free(pdh_cert); ++ ++ return ret; ++} ++ ++static int sev_receive_update_data(QEMUFile *f, uint8_t *ptr) ++{ ++ int ret = 1, fw_error = 0; ++ gchar *hdr = NULL, *trans = NULL; ++ struct kvm_sev_receive_update_data update = {}; ++ ++ /* get packet header */ ++ update.hdr_len = qemu_get_be32(f); ++ if (!check_blob_length(update.hdr_len)) { ++ return 1; ++ } ++ ++ hdr = g_new(gchar, update.hdr_len); ++ qemu_get_buffer(f, (uint8_t *)hdr, update.hdr_len); ++ update.hdr_uaddr = (uintptr_t)hdr; ++ ++ /* get transport buffer */ ++ update.trans_len = qemu_get_be32(f); ++ if (!check_blob_length(update.trans_len)) { ++ goto err; ++ } ++ ++ trans = g_new(gchar, update.trans_len); ++ update.trans_uaddr = (uintptr_t)trans; ++ qemu_get_buffer(f, (uint8_t *)update.trans_uaddr, update.trans_len); ++ ++ update.guest_uaddr = (uintptr_t) ptr; ++ update.guest_len = update.trans_len; ++ ++ trace_kvm_sev_receive_update_data(trans, ptr, update.guest_len, ++ hdr, update.hdr_len); ++ ++ ret = sev_ioctl(sev_guest->sev_fd, KVM_SEV_RECEIVE_UPDATE_DATA, ++ &update, &fw_error); ++ if (ret) { ++ error_report("Error RECEIVE_UPDATE_DATA ret=%d fw_error=%d '%s'", ++ ret, fw_error, fw_error_to_str(fw_error)); ++ goto err; ++ } ++err: ++ g_free(trans); ++ g_free(hdr); ++ return ret; ++} ++ ++int sev_load_incoming_page(QEMUFile *f, uint8_t *ptr) ++{ ++ SevGuestState *s = sev_guest; ++ ++ /* ++ * If this is first buffer and SEV is not in recieiving state then ++ * use RECEIVE_START command to create a encryption context. ++ */ ++ if (!sev_check_state(s, SEV_STATE_RECEIVE_UPDATE) && ++ sev_receive_start(s, f)) { ++ return 1; ++ } ++ ++ return sev_receive_update_data(f, ptr); ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 90bce02f9..031752c9e 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -55,6 +55,7 @@ int sev_save_setup(const char *pdh, const char *plat_cert, + const char *amd_cert); + int sev_save_outgoing_page(QEMUFile *f, uint8_t *ptr, + uint32_t size, uint64_t *bytes_sent); ++int sev_load_incoming_page(QEMUFile *f, uint8_t *ptr); + int sev_inject_launch_secret(const char *hdr, const char *secret, + uint64_t gpa, Error **errp); + +diff --git a/target/i386/trace-events b/target/i386/trace-events +index c165e3737..e32b0319b 100644 +--- a/target/i386/trace-events ++++ b/target/i386/trace-events +@@ -14,6 +14,9 @@ kvm_sev_attestation_report(const char *mnonce, const char *data) "mnonce %s data + kvm_sev_send_start(uint64_t pdh, int l1, uint64_t plat, int l2, uint64_t amd, int l3) "pdh 0x%" PRIx64 " len %d plat 0x%" PRIx64 " len %d amd 0x%" PRIx64 " len %d" + kvm_sev_send_update_data(void *src, void *dst, int len) "guest %p trans %p len %d" + kvm_sev_send_finish(void) "" ++kvm_sev_receive_start(int policy, void *session, void *pdh) "policy 0x%x session %p pdh %p" ++kvm_sev_receive_update_data(void *src, void *dst, int len, void *hdr, int hdr_len) "guest %p trans %p len %d hdr %p hdr_len %d" ++kvm_sev_receive_finish(void) "" + + # csv.c + kvm_csv_launch_encrypt_data(uint64_t gpa, void *addr, uint64_t len) "gpa 0x%" PRIx64 "addr %p len 0x%" PRIu64 +-- +2.31.1 + diff --git a/0017-kvm-Add-support-for-SEV-shared-regions-list-and-KVM_.patch b/0017-kvm-Add-support-for-SEV-shared-regions-list-and-KVM_.patch new file mode 100644 index 0000000..6e58f15 --- /dev/null +++ b/0017-kvm-Add-support-for-SEV-shared-regions-list-and-KVM_.patch @@ -0,0 +1,293 @@ +From 59a4679eb9a8229bc47ead1d0159521f4b224a21 Mon Sep 17 00:00:00 2001 +From: Ashish Kalra +Date: Tue, 27 Jul 2021 15:05:49 +0000 +Subject: [PATCH 08/28] kvm: Add support for SEV shared regions list and + KVM_EXIT_HYPERCALL. + +cherry-picked from https://github.com/AMDESE/qemu/commit/fcbbd9b19ac. + +KVM_HC_MAP_GPA_RANGE hypercall is used by the SEV guest to notify a +change in the page encryption status to the hypervisor. The hypercall +should be invoked only when the encryption attribute is changed from +encrypted -> decrypted and vice versa. By default all guest pages are +considered encrypted. + +The hypercall exits to userspace with KVM_EXIT_HYPERCALL exit code, +currently this is used only by SEV guests for guest page encryptiion +status tracking. Add support to handle this exit and invoke SEV +shared regions list handlers. + +Add support for SEV guest shared regions and implementation of the +SEV shared regions list. + +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + linux-headers/linux/kvm.h | 3 ++ + target/i386/kvm/kvm.c | 48 +++++++++++++++++ + target/i386/sev.c | 105 ++++++++++++++++++++++++++++++++++++++ + target/i386/sev.h | 3 ++ + 4 files changed, 159 insertions(+) + +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index e5b45c384..922086904 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -346,6 +346,7 @@ struct kvm_run { + } mmio; + /* KVM_EXIT_HYPERCALL */ + struct { ++#define KVM_HC_MAP_GPA_RANGE 12 + __u64 nr; + __u64 args[6]; + __u64 ret; +@@ -1176,6 +1177,8 @@ struct kvm_ppc_resize_hpt { + #define KVM_CAP_S390_ZPCI_OP 221 + #define KVM_CAP_S390_CPU_TOPOLOGY 222 + ++#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) ++ + #ifdef KVM_CAP_IRQ_ROUTING + + struct kvm_irq_routing_irqchip { +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 002b69903..521d6718d 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -134,6 +134,7 @@ static int has_pit_state2; + static int has_sregs2; + static int has_exception_payload; + static int has_triple_fault_event; ++static int has_map_gpa_range; + + static bool has_msr_mcg_ext_ctl; + +@@ -2136,6 +2137,17 @@ int kvm_arch_init_vcpu(CPUState *cs) + c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10); + } + ++ if (sev_enabled()) { ++ c = cpuid_find_entry(&cpuid_data.cpuid, ++ KVM_CPUID_FEATURES | kvm_base, 0); ++ if (c) { ++ c->eax |= (1 << KVM_FEATURE_MIGRATION_CONTROL); ++ if (has_map_gpa_range) { ++ c->eax |= (1 << KVM_FEATURE_HC_MAP_GPA_RANGE); ++ } ++ } ++ } ++ + cpuid_data.cpuid.nent = cpuid_i; + + cpuid_data.cpuid.padding = 0; +@@ -2518,6 +2530,17 @@ int kvm_arch_init(MachineState *ms, KVMState *s) + } + } + ++ has_map_gpa_range = kvm_check_extension(s, KVM_CAP_EXIT_HYPERCALL); ++ if (has_map_gpa_range) { ++ ret = kvm_vm_enable_cap(s, KVM_CAP_EXIT_HYPERCALL, 0, ++ KVM_EXIT_HYPERCALL_VALID_MASK); ++ if (ret < 0) { ++ error_report("kvm: Failed to enable MAP_GPA_RANGE cap: %s", ++ strerror(-ret)); ++ return ret; ++ } ++ } ++ + ret = kvm_get_supported_msrs(s); + if (ret < 0) { + return ret; +@@ -4964,6 +4987,28 @@ static int kvm_handle_tpr_access(X86CPU *cpu) + return 1; + } + ++static int kvm_handle_exit_hypercall(X86CPU *cpu, struct kvm_run *run) ++{ ++ /* ++ * Currently this exit is only used by SEV guests for ++ * guest page encryption status tracking. ++ */ ++ if (run->hypercall.nr == KVM_HC_MAP_GPA_RANGE) { ++ unsigned long enc = run->hypercall.args[2]; ++ unsigned long gpa = run->hypercall.args[0]; ++ unsigned long npages = run->hypercall.args[1]; ++ unsigned long gfn_start = gpa >> TARGET_PAGE_BITS; ++ unsigned long gfn_end = gfn_start + npages; ++ ++ if (enc) { ++ sev_remove_shared_regions_list(gfn_start, gfn_end); ++ } else { ++ sev_add_shared_regions_list(gfn_start, gfn_end); ++ } ++ } ++ return 0; ++} ++ + int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) + { + static const uint8_t int3 = 0xcc; +@@ -5384,6 +5429,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) + assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER); + ret = kvm_handle_wrmsr(cpu, run); + break; ++ case KVM_EXIT_HYPERCALL: ++ ret = kvm_handle_exit_hypercall(cpu, run); ++ break; + default: + fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); + ret = -1; +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 181bbe869..62493578c 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -45,6 +45,10 @@ + #define TYPE_SEV_GUEST "sev-guest" + OBJECT_DECLARE_SIMPLE_TYPE(SevGuestState, SEV_GUEST) + ++struct shared_region { ++ unsigned long gfn_start, gfn_end; ++ QTAILQ_ENTRY(shared_region) list; ++}; + + extern struct sev_ops sev_ops; + +@@ -90,6 +94,8 @@ struct SevGuestState { + uint32_t reset_cs; + uint32_t reset_ip; + bool reset_data_valid; ++ ++ QTAILQ_HEAD(, shared_region) shared_regions_list; + }; + + #define DEFAULT_GUEST_POLICY 0x1 /* disable debug */ +@@ -1145,6 +1151,7 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + add_migration_state_change_notifier(&sev_migration_state_notify); + + cgs_class->memory_encryption_ops = &sev_memory_encryption_ops; ++ QTAILQ_INIT(&sev->shared_regions_list); + + cgs->ready = true; + +@@ -1679,6 +1686,104 @@ int sev_load_incoming_page(QEMUFile *f, uint8_t *ptr) + return sev_receive_update_data(f, ptr); + } + ++int sev_remove_shared_regions_list(unsigned long start, unsigned long end) ++{ ++ SevGuestState *s = sev_guest; ++ struct shared_region *pos; ++ ++ QTAILQ_FOREACH(pos, &s->shared_regions_list, list) { ++ unsigned long l, r; ++ unsigned long curr_gfn_end = pos->gfn_end; ++ ++ /* ++ * Find if any intersection exists ? ++ * left bound for intersecting segment ++ */ ++ l = MAX(start, pos->gfn_start); ++ /* right bound for intersecting segment */ ++ r = MIN(end, pos->gfn_end); ++ if (l <= r) { ++ if (pos->gfn_start == l && pos->gfn_end == r) { ++ QTAILQ_REMOVE(&s->shared_regions_list, pos, list); ++ } else if (l == pos->gfn_start) { ++ pos->gfn_start = r; ++ } else if (r == pos->gfn_end) { ++ pos->gfn_end = l; ++ } else { ++ /* Do a de-merge -- split linked list nodes */ ++ struct shared_region *shrd_region; ++ ++ pos->gfn_end = l; ++ shrd_region = g_malloc0(sizeof(*shrd_region)); ++ if (!shrd_region) { ++ return 0; ++ } ++ shrd_region->gfn_start = r; ++ shrd_region->gfn_end = curr_gfn_end; ++ QTAILQ_INSERT_AFTER(&s->shared_regions_list, pos, ++ shrd_region, list); ++ } ++ } ++ if (end <= curr_gfn_end) { ++ break; ++ } ++ } ++ return 0; ++} ++ ++int sev_add_shared_regions_list(unsigned long start, unsigned long end) ++{ ++ struct shared_region *shrd_region; ++ struct shared_region *pos; ++ SevGuestState *s = sev_guest; ++ ++ if (QTAILQ_EMPTY(&s->shared_regions_list)) { ++ shrd_region = g_malloc0(sizeof(*shrd_region)); ++ if (!shrd_region) { ++ return -1; ++ } ++ shrd_region->gfn_start = start; ++ shrd_region->gfn_end = end; ++ QTAILQ_INSERT_TAIL(&s->shared_regions_list, shrd_region, list); ++ return 0; ++ } ++ ++ /* ++ * shared regions list is a sorted list in ascending order ++ * of guest PA's and also merges consecutive range of guest PA's ++ */ ++ QTAILQ_FOREACH(pos, &s->shared_regions_list, list) { ++ /* handle duplicate overlapping regions */ ++ if (start >= pos->gfn_start && end <= pos->gfn_end) { ++ return 0; ++ } ++ if (pos->gfn_end < start) { ++ continue; ++ } ++ /* merge consecutive guest PA(s) -- forward merge */ ++ if (pos->gfn_start <= start && pos->gfn_end >= start) { ++ pos->gfn_end = end; ++ return 0; ++ } ++ break; ++ } ++ /* ++ * Add a new node ++ */ ++ shrd_region = g_malloc0(sizeof(*shrd_region)); ++ if (!shrd_region) { ++ return -1; ++ } ++ shrd_region->gfn_start = start; ++ shrd_region->gfn_end = end; ++ if (pos) { ++ QTAILQ_INSERT_BEFORE(pos, shrd_region, list); ++ } else { ++ QTAILQ_INSERT_TAIL(&s->shared_regions_list, shrd_region, list); ++ } ++ return 1; ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 031752c9e..c4e761471 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -61,6 +61,9 @@ int sev_inject_launch_secret(const char *hdr, const char *secret, + + int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size); + void sev_es_set_reset_vector(CPUState *cpu); ++int sev_remove_shared_regions_list(unsigned long gfn_start, ++ unsigned long gfn_end); ++int sev_add_shared_regions_list(unsigned long gfn_start, unsigned long gfn_end); + + int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + +-- +2.31.1 + diff --git a/0018-migration-add-support-to-migrate-shared-regions-list.patch b/0018-migration-add-support-to-migrate-shared-regions-list.patch new file mode 100644 index 0000000..e061452 --- /dev/null +++ b/0018-migration-add-support-to-migrate-shared-regions-list.patch @@ -0,0 +1,103 @@ +From a34683265a5a32e7c401dd1347569c44718f9cab Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Tue, 27 Jul 2021 16:31:36 +0000 +Subject: [PATCH 09/28] migration: add support to migrate shared regions list + +cherry-picked from https://github.com/AMDESE/qemu/commit/9236f522e48b6. + +When memory encryption is enabled, the hypervisor maintains a shared +regions list which is referred by hypervisor during migration to check +if page is private or shared. This list is built during the VM bootup and +must be migrated to the target host so that hypervisor on target host can +use it for future migration. + +Signed-off-by: Brijesh Singh +Co-developed-by: Ashish Kalra +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + target/i386/sev.c | 43 +++++++++++++++++++++++++++++++++++++++++++ + target/i386/sev.h | 2 ++ + 2 files changed, 45 insertions(+) + +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 62493578c..c6f43ba3b 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -178,10 +178,15 @@ static const char *const sev_fw_errlist[] = { + + #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ + ++#define SHARED_REGION_LIST_CONT 0x1 ++#define SHARED_REGION_LIST_END 0x2 ++ + static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .save_setup = sev_save_setup, + .save_outgoing_page = sev_save_outgoing_page, + .load_incoming_page = sev_load_incoming_page, ++ .save_outgoing_shared_regions_list = sev_save_outgoing_shared_regions_list, ++ .load_incoming_shared_regions_list = sev_load_incoming_shared_regions_list, + }; + + static int +@@ -1784,6 +1789,44 @@ int sev_add_shared_regions_list(unsigned long start, unsigned long end) + return 1; + } + ++int sev_save_outgoing_shared_regions_list(QEMUFile *f) ++{ ++ SevGuestState *s = sev_guest; ++ struct shared_region *pos; ++ ++ QTAILQ_FOREACH(pos, &s->shared_regions_list, list) { ++ qemu_put_be32(f, SHARED_REGION_LIST_CONT); ++ qemu_put_be32(f, pos->gfn_start); ++ qemu_put_be32(f, pos->gfn_end); ++ } ++ ++ qemu_put_be32(f, SHARED_REGION_LIST_END); ++ return 0; ++} ++ ++int sev_load_incoming_shared_regions_list(QEMUFile *f) ++{ ++ SevGuestState *s = sev_guest; ++ struct shared_region *shrd_region; ++ int status; ++ ++ status = qemu_get_be32(f); ++ while (status == SHARED_REGION_LIST_CONT) { ++ ++ shrd_region = g_malloc0(sizeof(*shrd_region)); ++ if (!shrd_region) { ++ return 0; ++ } ++ shrd_region->gfn_start = qemu_get_be32(f); ++ shrd_region->gfn_end = qemu_get_be32(f); ++ ++ QTAILQ_INSERT_TAIL(&s->shared_regions_list, shrd_region, list); ++ ++ status = qemu_get_be32(f); ++ } ++ return 0; ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index c4e761471..89fe27285 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -64,6 +64,8 @@ void sev_es_set_reset_vector(CPUState *cpu); + int sev_remove_shared_regions_list(unsigned long gfn_start, + unsigned long gfn_end); + int sev_add_shared_regions_list(unsigned long gfn_start, unsigned long gfn_end); ++int sev_save_outgoing_shared_regions_list(QEMUFile *f); ++int sev_load_incoming_shared_regions_list(QEMUFile *f); + + int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + +-- +2.31.1 + diff --git a/0019-migration-ram-add-support-to-send-encrypted-pages.patch b/0019-migration-ram-add-support-to-send-encrypted-pages.patch new file mode 100644 index 0000000..c306f80 --- /dev/null +++ b/0019-migration-ram-add-support-to-send-encrypted-pages.patch @@ -0,0 +1,338 @@ +From 91c9ccc2deb1de8ea8822b710654c5c69e290f54 Mon Sep 17 00:00:00 2001 +From: Brijesh Singh +Date: Tue, 27 Jul 2021 16:53:19 +0000 +Subject: [PATCH 10/28] migration/ram: add support to send encrypted pages + +cherry-picked from https://github.com/AMDESE/qemu/commit/2d6bda0d4cf. + +When memory encryption is enabled, the guest memory will be encrypted with +the guest specific key. The patch introduces RAM_SAVE_FLAG_ENCRYPTED_PAGE +flag to distinguish the encrypted data from plaintext. Encrypted pages +may need special handling. The sev_save_outgoing_page() is used +by the sender to write the encrypted pages onto the socket, similarly the +sev_load_incoming_page() is used by the target to read the +encrypted pages from the socket and load into the guest memory. + +Signed-off-by: Brijesh Singh +Co-developed-by: Ashish Kalra +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + migration/migration.h | 2 + + migration/ram.c | 163 +++++++++++++++++++++++++++++++++++++++++- + target/i386/sev.c | 14 ++++ + target/i386/sev.h | 4 ++ + 4 files changed, 182 insertions(+), 1 deletion(-) + +diff --git a/migration/migration.h b/migration/migration.h +index cdad8acea..9011d3c7f 100644 +--- a/migration/migration.h ++++ b/migration/migration.h +@@ -480,4 +480,6 @@ void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page); + bool migrate_multi_channels_is_allowed(void); + void migrate_protocol_allow_multi_channels(bool allow); + ++bool memcrypt_enabled(void); ++ + #endif +diff --git a/migration/ram.c b/migration/ram.c +index f25ebd962..3eaabe60d 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -57,6 +57,10 @@ + #include "qemu/iov.h" + #include "multifd.h" + #include "sysemu/runstate.h" ++#include "exec/confidential-guest-support.h" ++ ++/* Defines RAM_SAVE_ENCRYPTED_PAGE and RAM_SAVE_SHARED_REGION_LIST */ ++#include "target/i386/sev.h" + + #include "hw/boards.h" /* for machine_dump_guest_core() */ + +@@ -82,6 +86,16 @@ + #define RAM_SAVE_FLAG_XBZRLE 0x40 + /* 0x80 is reserved in migration.h start with 0x100 next */ + #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 ++#define RAM_SAVE_FLAG_ENCRYPTED_DATA 0x200 ++ ++bool memcrypt_enabled(void) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ if(ms->cgs) ++ return ms->cgs->ready; ++ else ++ return false; ++} + + XBZRLECacheStats xbzrle_counters; + +@@ -519,6 +533,8 @@ static QemuCond decomp_done_cond; + + static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, + ram_addr_t offset, uint8_t *source_buf); ++static int ram_save_encrypted_page(RAMState *rs, PageSearchStatus *pss, ++ bool last_stage); + + static void postcopy_preempt_restore(RAMState *rs, PageSearchStatus *pss, + bool postcopy_requested); +@@ -1356,6 +1372,80 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, + return 1; + } + ++/** ++ * ram_save_encrypted_page - send the given encrypted page to the stream ++ */ ++static int ram_save_encrypted_page(RAMState *rs, PageSearchStatus *pss, ++ bool last_stage) ++{ ++ int ret; ++ uint8_t *p; ++ RAMBlock *block = pss->block; ++ ram_addr_t offset = pss->page << TARGET_PAGE_BITS; ++ uint64_t bytes_xmit; ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ ConfidentialGuestSupportClass *cgs_class = ++ (ConfidentialGuestSupportClass *) object_get_class(OBJECT(ms->cgs)); ++ struct ConfidentialGuestMemoryEncryptionOps *ops = ++ cgs_class->memory_encryption_ops; ++ ++ p = block->host + offset; ++ ++ ram_counters.transferred += ++ save_page_header(rs, rs->f, block, ++ offset | RAM_SAVE_FLAG_ENCRYPTED_DATA); ++ ++ qemu_put_be32(rs->f, RAM_SAVE_ENCRYPTED_PAGE); ++ ret = ops->save_outgoing_page(rs->f, p, TARGET_PAGE_SIZE, &bytes_xmit); ++ if (ret) { ++ return -1; ++ } ++ ++ ram_counters.transferred += bytes_xmit; ++ ram_counters.normal++; ++ ++ return 1; ++} ++ ++/** ++ * ram_save_shared_region_list: send the shared region list ++ */ ++static int ram_save_shared_region_list(RAMState *rs, QEMUFile *f) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ ConfidentialGuestSupportClass *cgs_class = ++ (ConfidentialGuestSupportClass *) object_get_class(OBJECT(ms->cgs)); ++ struct ConfidentialGuestMemoryEncryptionOps *ops = ++ cgs_class->memory_encryption_ops; ++ ++ save_page_header(rs, rs->f, rs->last_seen_block, ++ RAM_SAVE_FLAG_ENCRYPTED_DATA); ++ qemu_put_be32(rs->f, RAM_SAVE_SHARED_REGIONS_LIST); ++ return ops->save_outgoing_shared_regions_list(rs->f); ++} ++ ++static int load_encrypted_data(QEMUFile *f, uint8_t *ptr) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ ConfidentialGuestSupportClass *cgs_class = ++ (ConfidentialGuestSupportClass *) object_get_class(OBJECT(ms->cgs)); ++ struct ConfidentialGuestMemoryEncryptionOps *ops = ++ cgs_class->memory_encryption_ops; ++ ++ int flag; ++ ++ flag = qemu_get_be32(f); ++ ++ if (flag == RAM_SAVE_ENCRYPTED_PAGE) { ++ return ops->load_incoming_page(f, ptr); ++ } else if (flag == RAM_SAVE_SHARED_REGIONS_LIST) { ++ return ops->load_incoming_shared_regions_list(f); ++ } else { ++ error_report("unknown encrypted flag %x", flag); ++ return 1; ++ } ++} ++ + /** + * ram_save_page: send the given page to the stream + * +@@ -2272,6 +2362,35 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset) + return false; + } + ++/** ++ * encrypted_test_list: check if the page is encrypted ++ * ++ * Returns a bool indicating whether the page is encrypted. ++ */ ++static bool encrypted_test_list(RAMState *rs, RAMBlock *block, ++ unsigned long page) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ ConfidentialGuestSupportClass *cgs_class = ++ (ConfidentialGuestSupportClass *) object_get_class(OBJECT(ms->cgs)); ++ struct ConfidentialGuestMemoryEncryptionOps *ops = ++ cgs_class->memory_encryption_ops; ++ unsigned long gfn; ++ ++ /* ROM devices contains the unencrypted data */ ++ if (memory_region_is_rom(block->mr)) { ++ return false; ++ } ++ ++ /* ++ * Translate page in ram_addr_t address space to GPA address ++ * space using memory region. ++ */ ++ gfn = page + (block->mr->addr >> TARGET_PAGE_BITS); ++ ++ return ops->is_gfn_in_unshared_region(gfn); ++} ++ + /** + * ram_save_target_page: save one target page + * +@@ -2290,6 +2409,17 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss) + return res; + } + ++ /* ++ * If memory encryption is enabled then use memory encryption APIs ++ * to write the outgoing buffer to the wire. The encryption APIs ++ * will take care of accessing the guest memory and re-encrypt it ++ * for the transport purposes. ++ */ ++ if (memcrypt_enabled() && ++ encrypted_test_list(rs, pss->block, pss->page)) { ++ return ram_save_encrypted_page(rs, pss, rs->last_stage); ++ } ++ + if (save_compress_page(rs, block, offset)) { + return 1; + } +@@ -3194,6 +3324,18 @@ void qemu_guest_free_page_hint(void *addr, size_t len) + } + } + ++static int ram_encrypted_save_setup(void) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ ConfidentialGuestSupportClass *cgs_class = ++ (ConfidentialGuestSupportClass *) object_get_class(OBJECT(ms->cgs)); ++ struct ConfidentialGuestMemoryEncryptionOps *ops = ++ cgs_class->memory_encryption_ops; ++ MigrationParameters *p = &migrate_get_current()->parameters; ++ ++ return ops->save_setup(p->sev_pdh, p->sev_plat_cert, p->sev_amd_cert); ++} ++ + /* + * Each of ram_save_setup, ram_save_iterate and ram_save_complete has + * long-running RCU critical section. When rcu-reclaims in the code +@@ -3229,6 +3371,13 @@ static int ram_save_setup(QEMUFile *f, void *opaque) + (*rsp)->f = f; + + WITH_RCU_READ_LOCK_GUARD() { ++ ++ if (memcrypt_enabled()) { ++ if (ram_encrypted_save_setup()) { ++ return -1; ++ } ++ } ++ + qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE); + + RAMBLOCK_FOREACH_MIGRATABLE(block) { +@@ -3425,6 +3574,11 @@ static int ram_save_complete(QEMUFile *f, void *opaque) + + flush_compressed_data(rs); + ram_control_after_iterate(f, RAM_CONTROL_FINISH); ++ ++ /* send the shared regions list */ ++ if (memcrypt_enabled()) { ++ ret = ram_save_shared_region_list(rs, f); ++ } + } + + if (ret < 0) { +@@ -4254,7 +4408,8 @@ static int ram_load_precopy(QEMUFile *f) + } + + if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE | +- RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) { ++ RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE | ++ RAM_SAVE_FLAG_ENCRYPTED_DATA)) { + RAMBlock *block = ram_block_from_stream(mis, f, flags, + RAM_CHANNEL_PRECOPY); + +@@ -4384,6 +4539,12 @@ static int ram_load_precopy(QEMUFile *f) + break; + } + break; ++ case RAM_SAVE_FLAG_ENCRYPTED_DATA: ++ if (load_encrypted_data(f, host)) { ++ error_report("Failed to load encrypted data"); ++ ret = -EINVAL; ++ } ++ break; + case RAM_SAVE_FLAG_EOS: + /* normal exit */ + multifd_recv_sync_main(); +diff --git a/target/i386/sev.c b/target/i386/sev.c +index c6f43ba3b..01826fabb 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -185,6 +185,7 @@ static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .save_setup = sev_save_setup, + .save_outgoing_page = sev_save_outgoing_page, + .load_incoming_page = sev_load_incoming_page, ++ .is_gfn_in_unshared_region = sev_is_gfn_in_unshared_region, + .save_outgoing_shared_regions_list = sev_save_outgoing_shared_regions_list, + .load_incoming_shared_regions_list = sev_load_incoming_shared_regions_list, + }; +@@ -1827,6 +1828,19 @@ int sev_load_incoming_shared_regions_list(QEMUFile *f) + return 0; + } + ++bool sev_is_gfn_in_unshared_region(unsigned long gfn) ++{ ++ SevGuestState *s = sev_guest; ++ struct shared_region *pos; ++ ++ QTAILQ_FOREACH(pos, &s->shared_regions_list, list) { ++ if (gfn >= pos->gfn_start && gfn < pos->gfn_end) { ++ return false; ++ } ++ } ++ return true; ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 89fe27285..4c379e491 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -38,6 +38,9 @@ typedef struct SevKernelLoaderContext { + size_t cmdline_size; + } SevKernelLoaderContext; + ++#define RAM_SAVE_ENCRYPTED_PAGE 0x1 ++#define RAM_SAVE_SHARED_REGIONS_LIST 0x2 ++ + #ifdef CONFIG_SEV + bool sev_enabled(void); + bool sev_es_enabled(void); +@@ -66,6 +69,7 @@ int sev_remove_shared_regions_list(unsigned long gfn_start, + int sev_add_shared_regions_list(unsigned long gfn_start, unsigned long gfn_end); + int sev_save_outgoing_shared_regions_list(QEMUFile *f); + int sev_load_incoming_shared_regions_list(QEMUFile *f); ++bool sev_is_gfn_in_unshared_region(unsigned long gfn); + + int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + +-- +2.31.1 + diff --git a/0020-migration-ram-Force-encrypted-status-for-flash0-flas.patch b/0020-migration-ram-Force-encrypted-status-for-flash0-flas.patch new file mode 100644 index 0000000..4927bf5 --- /dev/null +++ b/0020-migration-ram-Force-encrypted-status-for-flash0-flas.patch @@ -0,0 +1,44 @@ +From 2107ec54dcb62bee15e2f068237ef94fb6e1e7e8 Mon Sep 17 00:00:00 2001 +From: Ashish Kalra +Date: Tue, 27 Jul 2021 18:05:25 +0000 +Subject: [PATCH 11/28] migration/ram: Force encrypted status for flash0 & + flash1 devices. + +cherry-picked from https://github.com/AMDESE/qemu/commit/803d6a4c8d. + +Currently OVMF clears the C-bit and marks NonExistent memory space +as decrypted in the page encryption bitmap. By marking the +NonExistent memory space as decrypted it gurantees any future MMIO adds +will work correctly, but this marks flash0 device space as decrypted. +At reset the SEV core will be in forced encrypted state, so this +decrypted marking of flash0 device space will cause VCPU reset to fail +as flash0 device pages will be migrated incorrectly. + +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + migration/ram.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/migration/ram.c b/migration/ram.c +index 3eaabe60d..b289b8a0c 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -2382,6 +2382,14 @@ static bool encrypted_test_list(RAMState *rs, RAMBlock *block, + return false; + } + ++ if (!strcmp(memory_region_name(block->mr), "system.flash0")) { ++ return true; ++ } ++ ++ if (!strcmp(memory_region_name(block->mr), "system.flash1")) { ++ return false; ++ } ++ + /* + * Translate page in ram_addr_t address space to GPA address + * space using memory region. +-- +2.31.1 + diff --git a/0021-migration-for-SEV-live-migration-bump-downtime-limit.patch b/0021-migration-for-SEV-live-migration-bump-downtime-limit.patch new file mode 100644 index 0000000..9451a54 --- /dev/null +++ b/0021-migration-for-SEV-live-migration-bump-downtime-limit.patch @@ -0,0 +1,64 @@ +From e08909880fc9565aa23dff84f0b9437d66cc39b3 Mon Sep 17 00:00:00 2001 +From: Ashish Kalra +Date: Tue, 3 Aug 2021 16:06:27 +0000 +Subject: [PATCH 12/28] migration: for SEV live migration bump downtime limit + to 1s. + +cherry-picked from https://github.com/AMDESE/qemu/commit/b1468803a2200. + +Now, qemu has a default expected downtime of 300 ms and +SEV Live migration has a page-per-second bandwidth of 350-450 pages +( SEV Live migration being generally slow due to guest RAM pages +being migrated after encryption using the security processor ). +With this expected downtime of 300ms and 350-450 pps bandwith, +the threshold size = <1/3 of the PPS bandwidth = ~100 pages. + +Now, this threshold size is the maximum pages/bytes that can be +sent in the final completion phase of Live migration +(where the source VM is stopped) with the expected downtime. +Therefore, with the threshold size computed above, +the migration completion phase which halts the source VM +and then transfers the leftover dirty pages, +is only reached in SEV live migration case when # of dirty pages are ~100. + +The dirty-pages-rate with larger guest RAM configuration like 4G, 8G, etc. +is much higher, typically in the range of 300-400+ pages, hence, +we always remain in the "dirty-sync" phase of migration and never +reach the migration completion phase with above guest RAM configs. + +To summarize, with larger guest RAM configs, +the dirty-pages-rate > threshold_size (with the default qemu expected downtime of 300ms). + +So, the fix is to increase qemu's expected downtime. + +This is a tweakable parameter which can be set using "migrate_set_downtime". + +With a downtime of 1 second, we get a threshold size of ~350-450 pages, +which will handle the "dirty-pages-rate" of 300+ pages and complete +the migration process, so we bump the default downtime to 1s in case +of SEV live migration being active. + +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + migration/migration.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/migration/migration.c b/migration/migration.c +index 0e8bb4837..d900dea74 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -3772,6 +3772,10 @@ static void migration_update_counters(MigrationState *s, + transferred = current_bytes - s->iteration_initial_bytes; + time_spent = current_time - s->iteration_start_time; + bandwidth = (double)transferred / time_spent; ++ if (memcrypt_enabled() && ++ s->parameters.downtime_limit < 1000) { ++ s->parameters.downtime_limit = 1000; ++ } + s->threshold_size = bandwidth * s->parameters.downtime_limit; + + s->mbps = (((double) transferred * 8.0) / +-- +2.31.1 + diff --git a/0022-kvm-Add-support-for-userspace-MSR-filtering-and-hand.patch b/0022-kvm-Add-support-for-userspace-MSR-filtering-and-hand.patch new file mode 100644 index 0000000..1d118d5 --- /dev/null +++ b/0022-kvm-Add-support-for-userspace-MSR-filtering-and-hand.patch @@ -0,0 +1,109 @@ +From 08e63d6e283e9d86be1e24ddea23d5e205e36e72 Mon Sep 17 00:00:00 2001 +From: Ashish Kalra +Date: Tue, 27 Jul 2021 17:59:33 +0000 +Subject: [PATCH 13/28] kvm: Add support for userspace MSR filtering and + handling of MSR_KVM_MIGRATION_CONTROL. + +cherry-picked from https://github.com/AMDESE/qemu/commit/67935c3fd5f. + +Add support for userspace MSR filtering using KVM_X86_SET_MSR_FILTER +ioctl and handling of MSRs in userspace. Currently this is only used +for SEV guests which use MSR_KVM_MIGRATION_CONTROL to indicate if the +guest is enabled and ready for migration. + +KVM arch code calls into SEV guest specific code to delete the +SEV migrate blocker which has been setup at SEV_LAUNCH_FINISH. + +Signed-off-by: Ashish Kalra +Signed-off-by: hanliyang +--- + target/i386/kvm/kvm.c | 35 +++++++++++++++++++++++++++++++++++ + target/i386/sev.c | 6 ++++++ + target/i386/sev.h | 1 + + 3 files changed, 42 insertions(+) + +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 521d6718d..f397b5e42 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -2433,6 +2433,32 @@ static bool kvm_rdmsr_core_thread_count(X86CPU *cpu, uint32_t msr, + return true; + } + ++/* ++ * Currently this exit is only used by SEV guests for ++ * MSR_KVM_MIGRATION_CONTROL to indicate if the guest ++ * is ready for migration. ++ */ ++static uint64_t msr_kvm_migration_control; ++ ++static bool kvm_rdmsr_kvm_migration_control(X86CPU *cpu, uint32_t msr, ++ uint64_t *val) ++{ ++ *val = msr_kvm_migration_control; ++ ++ return true; ++} ++ ++static bool kvm_wrmsr_kvm_migration_control(X86CPU *cpu, uint32_t msr, ++ uint64_t val) ++{ ++ msr_kvm_migration_control = val; ++ ++ if (val == KVM_MIGRATION_READY) ++ sev_del_migrate_blocker(); ++ ++ return true; ++} ++ + static Notifier smram_machine_done; + static KVMMemoryListener smram_listener; + static AddressSpace smram_address_space; +@@ -2678,6 +2704,15 @@ int kvm_arch_init(MachineState *ms, KVMState *s) + strerror(-ret)); + exit(1); + } ++ ++ r = kvm_filter_msr(s, MSR_KVM_MIGRATION_CONTROL, ++ kvm_rdmsr_kvm_migration_control, ++ kvm_wrmsr_kvm_migration_control); ++ if (!r) { ++ error_report("Could not install MSR_KVM_MIGRATION_CONTROL handler: %s", ++ strerror(-ret)); ++ exit(1); ++ } + } + + return 0; +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 01826fabb..6bbb8ecb8 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -930,6 +930,12 @@ sev_launch_finish(SevGuestState *sev) + migrate_add_blocker(sev_mig_blocker, &error_fatal); + } + ++void ++sev_del_migrate_blocker(void) ++{ ++ migrate_del_blocker(sev_mig_blocker); ++} ++ + static int + sev_receive_finish(SevGuestState *s) + { +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 4c379e491..a76746a9c 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -70,6 +70,7 @@ int sev_add_shared_regions_list(unsigned long gfn_start, unsigned long gfn_end); + int sev_save_outgoing_shared_regions_list(QEMUFile *f); + int sev_load_incoming_shared_regions_list(QEMUFile *f); + bool sev_is_gfn_in_unshared_region(unsigned long gfn); ++void sev_del_migrate_blocker(void); + + int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + +-- +2.31.1 + diff --git a/0023-anolis-migration-ram-Force-encrypted-status-for-VGA-.patch b/0023-anolis-migration-ram-Force-encrypted-status-for-VGA-.patch new file mode 100644 index 0000000..cbbae98 --- /dev/null +++ b/0023-anolis-migration-ram-Force-encrypted-status-for-VGA-.patch @@ -0,0 +1,33 @@ +From 04cec7f9571ae6e820008996748c814972276b02 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Tue, 8 Dec 2020 22:57:46 -0500 +Subject: [PATCH 14/28] anolis: migration/ram: Force encrypted status for VGA + vram + +The VGA vram memory region act as frame buffer of VM. This memory +is decrypted in the QEMU process. For CSV VM live migration, we +should avoid memory encryption status check on VGA vram. + +Signed-off-by: hanliyang +--- + migration/ram.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/migration/ram.c b/migration/ram.c +index b289b8a0c..28e05119f 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -2390,6 +2390,10 @@ static bool encrypted_test_list(RAMState *rs, RAMBlock *block, + return false; + } + ++ if (!strcmp(memory_region_name(block->mr), "vga.vram")) { ++ return false; ++ } ++ + /* + * Translate page in ram_addr_t address space to GPA address + * space using memory region. +-- +2.31.1 + diff --git a/0024-anolis-target-i386-sev-Clear-shared_regions_list-whe.patch b/0024-anolis-target-i386-sev-Clear-shared_regions_list-whe.patch new file mode 100644 index 0000000..e18f4dd --- /dev/null +++ b/0024-anolis-target-i386-sev-Clear-shared_regions_list-whe.patch @@ -0,0 +1,57 @@ +From eb03b9cd086048f4354e73fcfd7aefe7e5142ab6 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Sun, 16 Jan 2022 19:57:58 -0500 +Subject: [PATCH 15/28] anolis: target/i386: sev: Clear shared_regions_list + when reboot CSV Guest + +Also fix memory leak in sev_remove_shared_regions_list(). + +Signed-off-by: hanliyang +--- + target/i386/kvm/kvm.c | 5 +++++ + target/i386/sev.c | 5 +++-- + 2 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index f397b5e42..31667fdde 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -2215,6 +2215,11 @@ void kvm_arch_reset_vcpu(X86CPU *cpu) + env->mp_state = KVM_MP_STATE_RUNNABLE; + } + ++ if (cpu_is_bsp(cpu) && ++ sev_enabled() && has_map_gpa_range) { ++ sev_remove_shared_regions_list(0, -1); ++ } ++ + /* enabled by default */ + env->poll_control_msr = 1; + +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 6bbb8ecb8..424c404bd 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -1701,9 +1701,9 @@ int sev_load_incoming_page(QEMUFile *f, uint8_t *ptr) + int sev_remove_shared_regions_list(unsigned long start, unsigned long end) + { + SevGuestState *s = sev_guest; +- struct shared_region *pos; ++ struct shared_region *pos, *next_pos; + +- QTAILQ_FOREACH(pos, &s->shared_regions_list, list) { ++ QTAILQ_FOREACH_SAFE(pos, &s->shared_regions_list, list, next_pos) { + unsigned long l, r; + unsigned long curr_gfn_end = pos->gfn_end; + +@@ -1717,6 +1717,7 @@ int sev_remove_shared_regions_list(unsigned long start, unsigned long end) + if (l <= r) { + if (pos->gfn_start == l && pos->gfn_end == r) { + QTAILQ_REMOVE(&s->shared_regions_list, pos, list); ++ g_free(pos); + } else if (l == pos->gfn_start) { + pos->gfn_start = r; + } else if (r == pos->gfn_end) { +-- +2.31.1 + diff --git a/0025-anolis-migration-ram-Fix-calculation-of-gfn-correpon.patch b/0025-anolis-migration-ram-Fix-calculation-of-gfn-correpon.patch new file mode 100644 index 0000000..81d7c6c --- /dev/null +++ b/0025-anolis-migration-ram-Fix-calculation-of-gfn-correpon.patch @@ -0,0 +1,57 @@ +From 1a55b5a3b63bc9199da6e8c68f3bff64cc4f238f Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Sun, 16 Jan 2022 20:05:02 -0500 +Subject: [PATCH 16/28] anolis: migration/ram: Fix calculation of gfn correpond + to a page in ramblock + +A RAMBlock contains a host memory region which may consist of many +discontiguous MemoryRegion in AddressSpace of a Guest, so we cannot +get gpa by MemoryRegion.addr. Since KVM memslot records the relationship +between gpa and hva, so we can pass the hva of page in RAMBlock to +kvm_phisical_memory_addr_from_host() to get the expected gpa. + +Signed-off-by: hanliyang +--- + migration/ram.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/migration/ram.c b/migration/ram.c +index 28e05119f..4914474b2 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -61,6 +61,7 @@ + + /* Defines RAM_SAVE_ENCRYPTED_PAGE and RAM_SAVE_SHARED_REGION_LIST */ + #include "target/i386/sev.h" ++#include "sysemu/kvm.h" + + #include "hw/boards.h" /* for machine_dump_guest_core() */ + +@@ -2376,6 +2377,8 @@ static bool encrypted_test_list(RAMState *rs, RAMBlock *block, + struct ConfidentialGuestMemoryEncryptionOps *ops = + cgs_class->memory_encryption_ops; + unsigned long gfn; ++ hwaddr paddr = 0; ++ int ret; + + /* ROM devices contains the unencrypted data */ + if (memory_region_is_rom(block->mr)) { +@@ -2398,7 +2401,14 @@ static bool encrypted_test_list(RAMState *rs, RAMBlock *block, + * Translate page in ram_addr_t address space to GPA address + * space using memory region. + */ +- gfn = page + (block->mr->addr >> TARGET_PAGE_BITS); ++ if (kvm_enabled()) { ++ ret = kvm_physical_memory_addr_from_host(kvm_state, ++ block->host + (page << TARGET_PAGE_BITS), &paddr); ++ if (ret == 0) { ++ return false; ++ } ++ } ++ gfn = paddr >> TARGET_PAGE_BITS; + + return ops->is_gfn_in_unshared_region(gfn); + } +-- +2.31.1 + diff --git a/0026-anolis-target-i386-csv-Move-is_hygon_cpu-to-header-f.patch b/0026-anolis-target-i386-csv-Move-is_hygon_cpu-to-header-f.patch new file mode 100644 index 0000000..b87fc43 --- /dev/null +++ b/0026-anolis-target-i386-csv-Move-is_hygon_cpu-to-header-f.patch @@ -0,0 +1,85 @@ +From 9fe347f0dc354b49c4102bcfc9e44652db465d89 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Wed, 1 Nov 2023 06:00:11 +0000 +Subject: [PATCH 17/28] anolis: target/i386: csv: Move is_hygon_cpu() to header + file csv.h + +The helper function is_hygon_cpu() will be called by functions out of +target/i386/csv.c. Move it to header file csv.h so that it can be a +common helper function. + +Signed-off-by: hanliyang +--- + target/i386/csv.c | 20 -------------------- + target/i386/csv.h | 22 ++++++++++++++++++++++ + 2 files changed, 22 insertions(+), 20 deletions(-) + +diff --git a/target/i386/csv.c b/target/i386/csv.c +index d166d3775..161cad39a 100644 +--- a/target/i386/csv.c ++++ b/target/i386/csv.c +@@ -27,28 +27,8 @@ + + CsvGuestState csv_guest = { 0 }; + +-#define CPUID_VENDOR_HYGON_EBX 0x6f677948 /* "Hygo" */ +-#define CPUID_VENDOR_HYGON_ECX 0x656e6975 /* "uine" */ +-#define CPUID_VENDOR_HYGON_EDX 0x6e65476e /* "nGen" */ +- + #define GUEST_POLICY_CSV_BIT (1 << 6) + +-static bool is_hygon_cpu(void) +-{ +- uint32_t ebx = 0; +- uint32_t ecx = 0; +- uint32_t edx = 0; +- +- host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); +- +- if (ebx == CPUID_VENDOR_HYGON_EBX && +- ecx == CPUID_VENDOR_HYGON_ECX && +- edx == CPUID_VENDOR_HYGON_EDX) +- return true; +- else +- return false; +-} +- + int + csv_init(uint32_t policy, int fd, void *state, struct sev_ops *ops) + { +diff --git a/target/i386/csv.h b/target/i386/csv.h +index 640eafd45..c2bee16e9 100644 +--- a/target/i386/csv.h ++++ b/target/i386/csv.h +@@ -16,6 +16,28 @@ + + #include "qapi/qapi-commands-misc-target.h" + ++#include "cpu.h" ++ ++#define CPUID_VENDOR_HYGON_EBX 0x6f677948 /* "Hygo" */ ++#define CPUID_VENDOR_HYGON_ECX 0x656e6975 /* "uine" */ ++#define CPUID_VENDOR_HYGON_EDX 0x6e65476e /* "nGen" */ ++ ++static __attribute__((unused)) bool is_hygon_cpu(void) ++{ ++ uint32_t ebx = 0; ++ uint32_t ecx = 0; ++ uint32_t edx = 0; ++ ++ host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); ++ ++ if (ebx == CPUID_VENDOR_HYGON_EBX && ++ ecx == CPUID_VENDOR_HYGON_ECX && ++ edx == CPUID_VENDOR_HYGON_EDX) ++ return true; ++ else ++ return false; ++} ++ + #ifdef CONFIG_CSV + bool csv_enabled(void); + #else +-- +2.31.1 + diff --git a/0027-anolis-target-i386-csv-Read-cert-chain-from-file-whe.patch b/0027-anolis-target-i386-csv-Read-cert-chain-from-file-whe.patch new file mode 100644 index 0000000..b4d3663 --- /dev/null +++ b/0027-anolis-target-i386-csv-Read-cert-chain-from-file-whe.patch @@ -0,0 +1,131 @@ +From c71371d67be9c756d4c4b706a18766c29bdfba18 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Mon, 13 Nov 2023 21:55:33 +0000 +Subject: [PATCH 18/28] anolis: target/i386: csv: Read cert chain from file + when prepared for CSV live migration + +The cert chain is too long when encoded with base64, use the filename +of cert chain instead of the encoded string when prepared for CSV live +migration. + +Signed-off-by: hanliyang +--- + qapi/migration.json | 24 +++++++++++++++--------- + target/i386/sev.c | 29 +++++++++++++++++++++++++---- + 2 files changed, 40 insertions(+), 13 deletions(-) + +diff --git a/qapi/migration.json b/qapi/migration.json +index 4300a578f..e189a02e3 100644 +--- a/qapi/migration.json ++++ b/qapi/migration.json +@@ -776,14 +776,16 @@ + # block device name if there is one, and to their node name + # otherwise. (Since 5.2) + # +-# @sev-pdh: The target host platform diffie-hellman key encoded in base64 ++# @sev-pdh: The target host platform diffie-hellman key encoded in base64, or ++# pdh filename for hygon + # (Since 4.2) + # +-# @sev-plat-cert: The target host platform certificate chain encoded in base64 ++# @sev-plat-cert: The target host platform certificate chain encoded in base64, ++# or plat cert filename for hygon + # (Since 4.2) + # + # @sev-amd-cert: AMD certificate chain which include ASK and OCA encoded in +-# base64 (Since 4.2) ++# base64, or vendor cert filename for hygon (Since 4.2) + # + # Features: + # @unstable: Member @x-checkpoint-delay is experimental. +@@ -951,14 +953,16 @@ + # block device name if there is one, and to their node name + # otherwise. (Since 5.2) + # +-# @sev-pdh: The target host platform diffie-hellman key encoded in base64 ++# @sev-pdh: The target host platform diffie-hellman key encoded in base64, or ++# pdh filename for hygon + # (Since 4.2) + # +-# @sev-plat-cert: The target host platform certificate chain encoded in base64 ++# @sev-plat-cert: The target host platform certificate chain encoded in base64, ++# or plat cert filename for hygon + # (Since 4.2) + # + # @sev-amd-cert: AMD certificate chain which include ASK and OCA encoded in +-# base64 (Since 4.2) ++# base64, or vendor cert filename for hygon (Since 4.2) + # + # Features: + # @unstable: Member @x-checkpoint-delay is experimental. +@@ -1163,14 +1167,16 @@ + # block device name if there is one, and to their node name + # otherwise. (Since 5.2) + # +-# @sev-pdh: The target host platform diffie-hellman key encoded in base64 ++# @sev-pdh: The target host platform diffie-hellman key encoded in base64, or ++# pdh filename for hygon + # (Since 4.2) + # +-# @sev-plat-cert: The target host platform certificate chain encoded in base64 ++# @sev-plat-cert: The target host platform certificate chain encoded in base64, ++# or plat cert filename for hygon + # (Since 4.2) + # + # @sev-amd-cert: AMD certificate chain which include ASK and OCA encoded in +-# base64 (Since 4.2) ++# base64, or vendor cert filename for hygon (Since 4.2) + # + # Features: + # @unstable: Member @x-checkpoint-delay is experimental. +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 424c404bd..1dcf17fb2 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -984,18 +984,39 @@ int sev_save_setup(const char *pdh, const char *plat_cert, + { + SevGuestState *s = sev_guest; + +- s->remote_pdh = g_base64_decode(pdh, &s->remote_pdh_len); ++ if (is_hygon_cpu()) { ++ if (sev_read_file_base64(pdh, &s->remote_pdh, ++ &s->remote_pdh_len) < 0) { ++ goto error; ++ } ++ } else { ++ s->remote_pdh = g_base64_decode(pdh, &s->remote_pdh_len); ++ } + if (!check_blob_length(s->remote_pdh_len)) { + goto error; + } + +- s->remote_plat_cert = g_base64_decode(plat_cert, +- &s->remote_plat_cert_len); ++ if (is_hygon_cpu()) { ++ if (sev_read_file_base64(plat_cert, &s->remote_plat_cert, ++ &s->remote_plat_cert_len) < 0) { ++ goto error; ++ } ++ } else { ++ s->remote_plat_cert = g_base64_decode(plat_cert, ++ &s->remote_plat_cert_len); ++ } + if (!check_blob_length(s->remote_plat_cert_len)) { + goto error; + } + +- s->amd_cert = g_base64_decode(amd_cert, &s->amd_cert_len); ++ if (is_hygon_cpu()) { ++ if (sev_read_file_base64(amd_cert, &s->amd_cert, ++ &s->amd_cert_len) < 0) { ++ goto error; ++ } ++ } else { ++ s->amd_cert = g_base64_decode(amd_cert, &s->amd_cert_len); ++ } + if (!check_blob_length(s->amd_cert_len)) { + goto error; + } +-- +2.31.1 + diff --git a/0028-anolis-target-i386-csv-add-support-to-queue-the-outg.patch b/0028-anolis-target-i386-csv-add-support-to-queue-the-outg.patch new file mode 100644 index 0000000..f3f8862 --- /dev/null +++ b/0028-anolis-target-i386-csv-add-support-to-queue-the-outg.patch @@ -0,0 +1,270 @@ +From 66b28749c2b4625fd3226de8e2168b6b56f3b106 Mon Sep 17 00:00:00 2001 +From: fangbaoshun +Date: Mon, 2 Aug 2021 11:00:07 +0800 +Subject: [PATCH 19/28] anolis: target/i386: csv: add support to queue the + outgoing page into a list + +The csv_queue_outgoing_page() provide the implementation to queue the +guest private pages during transmission. The routines queues the outgoing +pages into a listi, and then issues the KVM_CSV_COMMAND_BATCH command to +encrypt the pages togather before writing them to the socket. + +Signed-off-by: hanliyang +--- + include/exec/confidential-guest-support.h | 3 + + linux-headers/linux/kvm.h | 6 + + target/i386/sev.c | 172 ++++++++++++++++++++++ + target/i386/sev.h | 2 + + 4 files changed, 183 insertions(+) + +diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h +index 343f686fc..1f32519d4 100644 +--- a/include/exec/confidential-guest-support.h ++++ b/include/exec/confidential-guest-support.h +@@ -77,6 +77,9 @@ struct ConfidentialGuestMemoryEncryptionOps { + + /* Load the shared regions list */ + int (*load_incoming_shared_regions_list)(QEMUFile *f); ++ ++ /* Queue the encrypted page and metadata associated with it into a list */ ++ int (*queue_outgoing_page)(uint8_t *ptr, uint32_t size, uint64_t addr); + }; + + typedef struct ConfidentialGuestSupportClass { +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index 922086904..58ab42c51 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -2005,6 +2005,12 @@ struct kvm_csv_init_data { + __u64 nodemask; + }; + ++struct kvm_csv_batch_list_node { ++ __u64 cmd_data_addr; ++ __u64 addr; ++ __u64 next_cmd_addr; ++}; ++ + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) + #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) + #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 1dcf17fb2..8577c164e 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -50,6 +50,15 @@ struct shared_region { + QTAILQ_ENTRY(shared_region) list; + }; + ++typedef struct CsvBatchCmdList CsvBatchCmdList; ++typedef void (*CsvDestroyCmdNodeFn) (void *data); ++ ++struct CsvBatchCmdList { ++ struct kvm_csv_batch_list_node *head; ++ struct kvm_csv_batch_list_node *tail; ++ CsvDestroyCmdNodeFn destroy_fn; ++}; ++ + extern struct sev_ops sev_ops; + + /** +@@ -96,6 +105,9 @@ struct SevGuestState { + bool reset_data_valid; + + QTAILQ_HEAD(, shared_region) shared_regions_list; ++ ++ /* link list used for HYGON CSV */ ++ CsvBatchCmdList *csv_batch_cmd_list; + }; + + #define DEFAULT_GUEST_POLICY 0x1 /* disable debug */ +@@ -188,6 +200,7 @@ static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .is_gfn_in_unshared_region = sev_is_gfn_in_unshared_region, + .save_outgoing_shared_regions_list = sev_save_outgoing_shared_regions_list, + .load_incoming_shared_regions_list = sev_load_incoming_shared_regions_list, ++ .queue_outgoing_page = csv_queue_outgoing_page, + }; + + static int +@@ -1869,6 +1882,165 @@ bool sev_is_gfn_in_unshared_region(unsigned long gfn) + return true; + } + ++#include "csv.h" ++ ++static CsvBatchCmdList * ++csv_batch_cmd_list_create(struct kvm_csv_batch_list_node *head, ++ CsvDestroyCmdNodeFn func) ++{ ++ CsvBatchCmdList *csv_batch_cmd_list = ++ g_malloc0(sizeof(*csv_batch_cmd_list)); ++ ++ if (!csv_batch_cmd_list) { ++ return NULL; ++ } ++ ++ csv_batch_cmd_list->head = head; ++ csv_batch_cmd_list->tail = head; ++ csv_batch_cmd_list->destroy_fn = func; ++ ++ return csv_batch_cmd_list; ++} ++ ++static int ++csv_batch_cmd_list_add_after(CsvBatchCmdList *list, ++ struct kvm_csv_batch_list_node *new_node) ++{ ++ list->tail->next_cmd_addr = (__u64)new_node; ++ list->tail = new_node; ++ ++ return 0; ++} ++ ++static struct kvm_csv_batch_list_node * ++csv_batch_cmd_list_node_create(uint64_t cmd_data_addr, uint64_t addr) ++{ ++ struct kvm_csv_batch_list_node *new_node = ++ g_malloc0(sizeof(struct kvm_csv_batch_list_node)); ++ ++ if (!new_node) { ++ return NULL; ++ } ++ ++ new_node->cmd_data_addr = cmd_data_addr; ++ new_node->addr = addr; ++ new_node->next_cmd_addr = 0; ++ ++ return new_node; ++} ++ ++static int csv_batch_cmd_list_destroy(CsvBatchCmdList *list) ++{ ++ struct kvm_csv_batch_list_node *node = list->head; ++ ++ while (node != NULL) { ++ if (list->destroy_fn != NULL) ++ list->destroy_fn((void *)node->cmd_data_addr); ++ ++ list->head = (struct kvm_csv_batch_list_node *)node->next_cmd_addr; ++ g_free(node); ++ node = list->head; ++ } ++ ++ g_free(list); ++ return 0; ++} ++ ++static void send_update_data_free(void *data) ++{ ++ struct kvm_sev_send_update_data *update = ++ (struct kvm_sev_send_update_data *)data; ++ g_free((guchar *)update->hdr_uaddr); ++ g_free((guchar *)update->trans_uaddr); ++ g_free(update); ++} ++ ++static int ++csv_send_queue_data(SevGuestState *s, uint8_t *ptr, ++ uint32_t size, uint64_t addr) ++{ ++ int ret = 0; ++ int fw_error; ++ guchar *trans; ++ guchar *packet_hdr; ++ struct kvm_sev_send_update_data *update; ++ struct kvm_csv_batch_list_node *new_node = NULL; ++ ++ /* If this is first call then query the packet header bytes and allocate ++ * the packet buffer. ++ */ ++ if (s->send_packet_hdr_len < 1) { ++ s->send_packet_hdr_len = sev_send_get_packet_len(&fw_error); ++ if (s->send_packet_hdr_len < 1) { ++ error_report("%s: SEND_UPDATE fw_error=%d '%s'", ++ __func__, fw_error, fw_error_to_str(fw_error)); ++ return 1; ++ } ++ } ++ ++ packet_hdr = g_new(guchar, s->send_packet_hdr_len); ++ memset(packet_hdr, 0, s->send_packet_hdr_len); ++ ++ update = g_new0(struct kvm_sev_send_update_data, 1); ++ ++ /* allocate transport buffer */ ++ trans = g_new(guchar, size); ++ ++ update->hdr_uaddr = (unsigned long)packet_hdr; ++ update->hdr_len = s->send_packet_hdr_len; ++ update->guest_uaddr = (unsigned long)ptr; ++ update->guest_len = size; ++ update->trans_uaddr = (unsigned long)trans; ++ update->trans_len = size; ++ ++ new_node = csv_batch_cmd_list_node_create((uint64_t)update, addr); ++ if (!new_node) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ if (s->csv_batch_cmd_list == NULL) { ++ s->csv_batch_cmd_list = csv_batch_cmd_list_create(new_node, ++ send_update_data_free); ++ if (s->csv_batch_cmd_list == NULL) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ } else { ++ /* Add new_node's command address to the last_node */ ++ csv_batch_cmd_list_add_after(s->csv_batch_cmd_list, new_node); ++ } ++ ++ trace_kvm_sev_send_update_data(ptr, trans, size); ++ ++ return ret; ++ ++err: ++ g_free(trans); ++ g_free(update); ++ g_free(packet_hdr); ++ g_free(new_node); ++ if (s->csv_batch_cmd_list) { ++ csv_batch_cmd_list_destroy(s->csv_batch_cmd_list); ++ s->csv_batch_cmd_list = NULL; ++ } ++ return ret; ++} ++ ++int ++csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr) ++{ ++ SevGuestState *s = sev_guest; ++ ++ /* Only support for HYGON CSV */ ++ if (!is_hygon_cpu()) { ++ error_report("Only support enqueue pages for HYGON CSV"); ++ return -EINVAL; ++ } ++ ++ return csv_send_queue_data(s, ptr, sz, addr); ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index a76746a9c..d86a546d0 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -74,6 +74,8 @@ void sev_del_migrate_blocker(void); + + int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + ++int csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr); ++ + struct sev_ops { + int (*sev_ioctl)(int fd, int cmd, void *data, int *error); + const char *(*fw_error_to_str)(int code); +-- +2.31.1 + diff --git a/0029-anolis-target-i386-csv-add-support-to-encrypt-the-ou.patch b/0029-anolis-target-i386-csv-add-support-to-encrypt-the-ou.patch new file mode 100644 index 0000000..35175bc --- /dev/null +++ b/0029-anolis-target-i386-csv-add-support-to-encrypt-the-ou.patch @@ -0,0 +1,204 @@ +From 6360c40637e28de387e29413d4bba35b4674c2ed Mon Sep 17 00:00:00 2001 +From: fangbaoshun +Date: Mon, 2 Aug 2021 11:41:58 +0800 +Subject: [PATCH 20/28] anolis: target/i386: csv: add support to encrypt the + outgoing pages in the list queued before. + +The csv_save_queued_outgoing_pages() provide the implementation to encrypt +the guest private pages during transmission. The routines uses SEND_START +command to create the outgoing encryption context on the first call then +uses COMMAND_BATCH command to send the SEND_UPDATE_DATA commands queued +in the list to encrypt the data before writing it to the socket. While +encrypting the data SEND_UPDATE_DATA produces some metadata (e.g MAC, IV). +The metadata is also sent to the target machine. After migration is completed, +we issue the SEND_FINISH command to transition the SEV guest state from sending +to unrunnable state. + +Signed-off-by: hanliyang +--- + include/exec/confidential-guest-support.h | 4 + + linux-headers/linux/kvm.h | 8 ++ + target/i386/sev.c | 89 +++++++++++++++++++++++ + target/i386/sev.h | 4 + + 4 files changed, 105 insertions(+) + +diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h +index 1f32519d4..1ff4823b6 100644 +--- a/include/exec/confidential-guest-support.h ++++ b/include/exec/confidential-guest-support.h +@@ -80,6 +80,10 @@ struct ConfidentialGuestMemoryEncryptionOps { + + /* Queue the encrypted page and metadata associated with it into a list */ + int (*queue_outgoing_page)(uint8_t *ptr, uint32_t size, uint64_t addr); ++ ++ /* Write the list queued with encrypted pages and metadata associated ++ * with them */ ++ int (*save_queued_outgoing_pages)(QEMUFile *f, uint64_t *bytes_sent); + }; + + typedef struct ConfidentialGuestSupportClass { +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index 58ab42c51..b90f4eb31 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -1890,6 +1890,9 @@ enum sev_cmd_id { + /* Guest Migration Extension */ + KVM_SEV_SEND_CANCEL, + ++ /* Hygon CSV batch command */ ++ KVM_CSV_COMMAND_BATCH = 0x18, ++ + KVM_SEV_NR_MAX, + }; + +@@ -2011,6 +2014,11 @@ struct kvm_csv_batch_list_node { + __u64 next_cmd_addr; + }; + ++struct kvm_csv_command_batch { ++ __u32 command_id; ++ __u64 csv_batch_list_uaddr; ++}; ++ + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) + #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) + #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 8577c164e..f0d790b49 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -201,6 +201,7 @@ static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .save_outgoing_shared_regions_list = sev_save_outgoing_shared_regions_list, + .load_incoming_shared_regions_list = sev_load_incoming_shared_regions_list, + .queue_outgoing_page = csv_queue_outgoing_page, ++ .save_queued_outgoing_pages = csv_save_queued_outgoing_pages, + }; + + static int +@@ -2027,6 +2028,70 @@ err: + return ret; + } + ++static int ++csv_command_batch(uint32_t cmd_id, uint64_t head_uaddr, int *fw_err) ++{ ++ int ret; ++ struct kvm_csv_command_batch command_batch = { }; ++ ++ command_batch.command_id = cmd_id; ++ command_batch.csv_batch_list_uaddr = head_uaddr; ++ ++ ret = sev_ioctl(sev_guest->sev_fd, KVM_CSV_COMMAND_BATCH, ++ &command_batch, fw_err); ++ if (ret) { ++ error_report("%s: COMMAND_BATCH ret=%d fw_err=%d '%s'", ++ __func__, ret, *fw_err, fw_error_to_str(*fw_err)); ++ } ++ ++ return ret; ++} ++ ++static int ++csv_send_update_data_batch(SevGuestState *s, QEMUFile *f, uint64_t *bytes_sent) ++{ ++ int ret, fw_error = 0; ++ struct kvm_sev_send_update_data *update; ++ struct kvm_csv_batch_list_node *node; ++ ++ ret = csv_command_batch(KVM_SEV_SEND_UPDATE_DATA, ++ (uint64_t)s->csv_batch_cmd_list->head, &fw_error); ++ if (ret) { ++ error_report("%s: csv_command_batch ret=%d fw_error=%d '%s'", ++ __func__, ret, fw_error, fw_error_to_str(fw_error)); ++ goto err; ++ } ++ ++ *bytes_sent = 0; ++ for (node = s->csv_batch_cmd_list->head; ++ node != NULL; ++ node = (struct kvm_csv_batch_list_node *)node->next_cmd_addr) { ++ if (node != s->csv_batch_cmd_list->head) { ++ /* head's page header is saved before send_update_data */ ++ qemu_put_be64(f, node->addr); ++ *bytes_sent += 8; ++ if (node->next_cmd_addr != 0) ++ qemu_put_be32(f, RAM_SAVE_ENCRYPTED_PAGE_BATCH); ++ else ++ qemu_put_be32(f, RAM_SAVE_ENCRYPTED_PAGE_BATCH_END); ++ *bytes_sent += 4; ++ } ++ update = (struct kvm_sev_send_update_data *)node->cmd_data_addr; ++ qemu_put_be32(f, update->hdr_len); ++ qemu_put_buffer(f, (uint8_t *)update->hdr_uaddr, update->hdr_len); ++ *bytes_sent += (4 + update->hdr_len); ++ ++ qemu_put_be32(f, update->trans_len); ++ qemu_put_buffer(f, (uint8_t *)update->trans_uaddr, update->trans_len); ++ *bytes_sent += (4 + update->trans_len); ++ } ++ ++err: ++ csv_batch_cmd_list_destroy(s->csv_batch_cmd_list); ++ s->csv_batch_cmd_list = NULL; ++ return ret; ++} ++ + int + csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr) + { +@@ -2041,6 +2106,30 @@ csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr) + return csv_send_queue_data(s, ptr, sz, addr); + } + ++int ++csv_save_queued_outgoing_pages(QEMUFile *f, uint64_t *bytes_sent) ++{ ++ SevGuestState *s = sev_guest; ++ ++ /* Only support for HYGON CSV */ ++ if (!is_hygon_cpu()) { ++ error_report("Only support transfer queued pages for HYGON CSV"); ++ return -EINVAL; ++ } ++ ++ /* ++ * If this is a first buffer then create outgoing encryption context ++ * and write our PDH, policy and session data. ++ */ ++ if (!sev_check_state(s, SEV_STATE_SEND_UPDATE) && ++ sev_send_start(s, f, bytes_sent)) { ++ error_report("Failed to create outgoing context"); ++ return 1; ++ } ++ ++ return csv_send_update_data_batch(s, f, bytes_sent); ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index d86a546d0..6b4f13225 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -41,6 +41,9 @@ typedef struct SevKernelLoaderContext { + #define RAM_SAVE_ENCRYPTED_PAGE 0x1 + #define RAM_SAVE_SHARED_REGIONS_LIST 0x2 + ++#define RAM_SAVE_ENCRYPTED_PAGE_BATCH 0x4 ++#define RAM_SAVE_ENCRYPTED_PAGE_BATCH_END 0x5 ++ + #ifdef CONFIG_SEV + bool sev_enabled(void); + bool sev_es_enabled(void); +@@ -75,6 +78,7 @@ void sev_del_migrate_blocker(void); + int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + + int csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr); ++int csv_save_queued_outgoing_pages(QEMUFile *f, uint64_t *bytes_sent); + + struct sev_ops { + int (*sev_ioctl)(int fd, int cmd, void *data, int *error); +-- +2.31.1 + diff --git a/0030-anolis-target-i386-csv-add-support-to-queue-the-inco.patch b/0030-anolis-target-i386-csv-add-support-to-queue-the-inco.patch new file mode 100644 index 0000000..d845c3a --- /dev/null +++ b/0030-anolis-target-i386-csv-add-support-to-queue-the-inco.patch @@ -0,0 +1,171 @@ +From 4c63c5ec2c3d8c7eb824760e5cbcd18c950c05f1 Mon Sep 17 00:00:00 2001 +From: fangbaoshun +Date: Mon, 2 Aug 2021 13:49:48 +0800 +Subject: [PATCH 21/28] anolis: target/i386: csv: add support to queue the + incoming page into a list + +The csv_queue_incoming_page() provide the implementation to queue the +guest private pages during transmission. The routines queues the incoming +socket which contains the guest private pages into a list then uses the +COMMAND_BATCH command to load the encrypted pages into the guest memory. + +Signed-off-by: hanliyang +--- + include/exec/confidential-guest-support.h | 3 + + target/i386/sev.c | 92 +++++++++++++++++++++++ + target/i386/sev.h | 1 + + 3 files changed, 96 insertions(+) + +diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h +index 1ff4823b6..530b5057b 100644 +--- a/include/exec/confidential-guest-support.h ++++ b/include/exec/confidential-guest-support.h +@@ -84,6 +84,9 @@ struct ConfidentialGuestMemoryEncryptionOps { + /* Write the list queued with encrypted pages and metadata associated + * with them */ + int (*save_queued_outgoing_pages)(QEMUFile *f, uint64_t *bytes_sent); ++ ++ /* Queue the incoming encrypted page into a list */ ++ int (*queue_incoming_page)(QEMUFile *f, uint8_t *ptr); + }; + + typedef struct ConfidentialGuestSupportClass { +diff --git a/target/i386/sev.c b/target/i386/sev.c +index f0d790b49..3088cc08e 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -202,6 +202,7 @@ static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .load_incoming_shared_regions_list = sev_load_incoming_shared_regions_list, + .queue_outgoing_page = csv_queue_outgoing_page, + .save_queued_outgoing_pages = csv_save_queued_outgoing_pages, ++ .queue_incoming_page = csv_queue_incoming_page, + }; + + static int +@@ -1956,6 +1957,15 @@ static void send_update_data_free(void *data) + g_free(update); + } + ++static void receive_update_data_free(void *data) ++{ ++ struct kvm_sev_receive_update_data *update = ++ (struct kvm_sev_receive_update_data *)data; ++ g_free((guchar *)update->hdr_uaddr); ++ g_free((guchar *)update->trans_uaddr); ++ g_free(update); ++} ++ + static int + csv_send_queue_data(SevGuestState *s, uint8_t *ptr, + uint32_t size, uint64_t addr) +@@ -2028,6 +2038,66 @@ err: + return ret; + } + ++static int ++csv_receive_queue_data(SevGuestState *s, QEMUFile *f, uint8_t *ptr) ++{ ++ int ret = 0; ++ gchar *hdr = NULL, *trans = NULL; ++ struct kvm_sev_receive_update_data *update; ++ struct kvm_csv_batch_list_node *new_node = NULL; ++ ++ update = g_new0(struct kvm_sev_receive_update_data, 1); ++ /* get packet header */ ++ update->hdr_len = qemu_get_be32(f); ++ hdr = g_new(gchar, update->hdr_len); ++ qemu_get_buffer(f, (uint8_t *)hdr, update->hdr_len); ++ update->hdr_uaddr = (unsigned long)hdr; ++ ++ /* get transport buffer */ ++ update->trans_len = qemu_get_be32(f); ++ trans = g_new(gchar, update->trans_len); ++ update->trans_uaddr = (unsigned long)trans; ++ qemu_get_buffer(f, (uint8_t *)update->trans_uaddr, update->trans_len); ++ ++ /* set guest address,guest len is page_size */ ++ update->guest_uaddr = (uint64_t)ptr; ++ update->guest_len = TARGET_PAGE_SIZE; ++ ++ new_node = csv_batch_cmd_list_node_create((uint64_t)update, 0); ++ if (!new_node) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ if (s->csv_batch_cmd_list == NULL) { ++ s->csv_batch_cmd_list = csv_batch_cmd_list_create(new_node, ++ receive_update_data_free); ++ if (s->csv_batch_cmd_list == NULL) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ } else { ++ /* Add new_node's command address to the last_node */ ++ csv_batch_cmd_list_add_after(s->csv_batch_cmd_list, new_node); ++ } ++ ++ trace_kvm_sev_receive_update_data(trans, (void *)ptr, update->guest_len, ++ (void *)hdr, update->hdr_len); ++ ++ return ret; ++ ++err: ++ g_free(trans); ++ g_free(update); ++ g_free(hdr); ++ g_free(new_node); ++ if (s->csv_batch_cmd_list) { ++ csv_batch_cmd_list_destroy(s->csv_batch_cmd_list); ++ s->csv_batch_cmd_list = NULL; ++ } ++ return ret; ++} ++ + static int + csv_command_batch(uint32_t cmd_id, uint64_t head_uaddr, int *fw_err) + { +@@ -2106,6 +2176,28 @@ csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr) + return csv_send_queue_data(s, ptr, sz, addr); + } + ++int csv_queue_incoming_page(QEMUFile *f, uint8_t *ptr) ++{ ++ SevGuestState *s = sev_guest; ++ ++ /* Only support for HYGON CSV */ ++ if (!is_hygon_cpu()) { ++ error_report("Only support enqueue received pages for HYGON CSV"); ++ return -EINVAL; ++ } ++ ++ /* ++ * If this is first buffer and SEV is not in recieiving state then ++ * use RECEIVE_START command to create a encryption context. ++ */ ++ if (!sev_check_state(s, SEV_STATE_RECEIVE_UPDATE) && ++ sev_receive_start(s, f)) { ++ return 1; ++ } ++ ++ return csv_receive_queue_data(s, f, ptr); ++} ++ + int + csv_save_queued_outgoing_pages(QEMUFile *f, uint64_t *bytes_sent) + { +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 6b4f13225..6d8332f83 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -79,6 +79,7 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + + int csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr); + int csv_save_queued_outgoing_pages(QEMUFile *f, uint64_t *bytes_sent); ++int csv_queue_incoming_page(QEMUFile *f, uint8_t *ptr); + + struct sev_ops { + int (*sev_ioctl)(int fd, int cmd, void *data, int *error); +-- +2.31.1 + diff --git a/0031-anolis-target-i386-csv-add-support-to-load-incoming-.patch b/0031-anolis-target-i386-csv-add-support-to-load-incoming-.patch new file mode 100644 index 0000000..b5ea271 --- /dev/null +++ b/0031-anolis-target-i386-csv-add-support-to-load-incoming-.patch @@ -0,0 +1,108 @@ +From 7b1673952df3b0dd0361b3ae1bb2aa3004f17bd9 Mon Sep 17 00:00:00 2001 +From: fangbaoshun +Date: Mon, 2 Aug 2021 14:11:43 +0800 +Subject: [PATCH 22/28] anolis: target/i386: csv: add support to load incoming + encrypted pages queued in the CMD list + +The csv_load_queued_incoming_pages() provide the implementation to read the +incoming guest private pages from the socket queued in the CMD list and load +them into the guest memory. The routines uses the RECEIVE_START command to +create the incoming encryption context on the first call then uses the +COMMAND_BATCH carried with RECEIEVE_UPDATE_DATA commands to load the encrypted +pages into the guest memory. After migration is completed, we issue the +RECEIVE_FINISH command to transition the SEV guest to the runnable state +so that it can be executed. + +Signed-off-by: hanliyang +--- + include/exec/confidential-guest-support.h | 3 +++ + target/i386/sev.c | 32 +++++++++++++++++++++++ + target/i386/sev.h | 1 + + 3 files changed, 36 insertions(+) + +diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h +index 530b5057b..f6a30fab3 100644 +--- a/include/exec/confidential-guest-support.h ++++ b/include/exec/confidential-guest-support.h +@@ -87,6 +87,9 @@ struct ConfidentialGuestMemoryEncryptionOps { + + /* Queue the incoming encrypted page into a list */ + int (*queue_incoming_page)(QEMUFile *f, uint8_t *ptr); ++ ++ /* Load the incoming encrypted pages queued in list into guest memory */ ++ int (*load_queued_incoming_pages)(QEMUFile *f); + }; + + typedef struct ConfidentialGuestSupportClass { +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 3088cc08e..e33263076 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -203,6 +203,7 @@ static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .queue_outgoing_page = csv_queue_outgoing_page, + .save_queued_outgoing_pages = csv_save_queued_outgoing_pages, + .queue_incoming_page = csv_queue_incoming_page, ++ .load_queued_incoming_pages = csv_load_queued_incoming_pages, + }; + + static int +@@ -2162,6 +2163,24 @@ err: + return ret; + } + ++static int ++csv_receive_update_data_batch(SevGuestState *s) ++{ ++ int ret; ++ int fw_error; ++ ++ ret = csv_command_batch(KVM_SEV_RECEIVE_UPDATE_DATA, ++ (uint64_t)s->csv_batch_cmd_list->head, &fw_error); ++ if (ret) { ++ error_report("%s: csv_command_batch ret=%d fw_error=%d '%s'", ++ __func__, ret, fw_error, fw_error_to_str(fw_error)); ++ } ++ ++ csv_batch_cmd_list_destroy(s->csv_batch_cmd_list); ++ s->csv_batch_cmd_list = NULL; ++ return ret; ++} ++ + int + csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr) + { +@@ -2222,6 +2241,19 @@ csv_save_queued_outgoing_pages(QEMUFile *f, uint64_t *bytes_sent) + return csv_send_update_data_batch(s, f, bytes_sent); + } + ++int csv_load_queued_incoming_pages(QEMUFile *f) ++{ ++ SevGuestState *s = sev_guest; ++ ++ /* Only support for HYGON CSV */ ++ if (!is_hygon_cpu()) { ++ error_report("Only support load queued pages for HYGON CSV"); ++ return -EINVAL; ++ } ++ ++ return csv_receive_update_data_batch(s); ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 6d8332f83..291046514 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -80,6 +80,7 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + int csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr); + int csv_save_queued_outgoing_pages(QEMUFile *f, uint64_t *bytes_sent); + int csv_queue_incoming_page(QEMUFile *f, uint8_t *ptr); ++int csv_load_queued_incoming_pages(QEMUFile *f); + + struct sev_ops { + int (*sev_ioctl)(int fd, int cmd, void *data, int *error); +-- +2.31.1 + diff --git a/0032-anolis-migration-ram-Accelerate-the-transmission-of-.patch b/0032-anolis-migration-ram-Accelerate-the-transmission-of-.patch new file mode 100644 index 0000000..d85f2e7 --- /dev/null +++ b/0032-anolis-migration-ram-Accelerate-the-transmission-of-.patch @@ -0,0 +1,229 @@ +From f5e1ce379e7757accbc3dbca64edc443587b758f Mon Sep 17 00:00:00 2001 +From: fangbaoshun +Date: Mon, 2 Aug 2021 14:35:51 +0800 +Subject: [PATCH 23/28] anolis: migration/ram: Accelerate the transmission of + CSV guest's encrypted pages + +When memory encryption is enabled, the guest memory will be encrypted with +the guest specific key. The patch introduces an accelerate solution which +queued the pages into list and send them togather by COMMAND_BATCH. + +Signed-off-by: hanliyang +--- + configs/devices/i386-softmmu/default.mak | 1 + + hw/i386/Kconfig | 5 ++ + migration/ram.c | 106 +++++++++++++++++++++++ + target/i386/csv.h | 11 +++ + target/i386/sev.h | 1 + + 5 files changed, 124 insertions(+) + +diff --git a/configs/devices/i386-softmmu/default.mak b/configs/devices/i386-softmmu/default.mak +index db83ffcab..e948e54e4 100644 +--- a/configs/devices/i386-softmmu/default.mak ++++ b/configs/devices/i386-softmmu/default.mak +@@ -24,6 +24,7 @@ + #CONFIG_VTD=n + #CONFIG_SGX=n + #CONFIG_CSV=n ++#CONFIG_HYGON_CSV_MIG_ACCEL=n + + # Boards: + # +diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig +index ed35d762b..8876b360b 100644 +--- a/hw/i386/Kconfig ++++ b/hw/i386/Kconfig +@@ -4,6 +4,7 @@ config X86_FW_OVMF + config SEV + bool + select X86_FW_OVMF ++ select HYGON_CSV_MIG_ACCEL + depends on KVM + + config SGX +@@ -14,6 +15,10 @@ config CSV + bool + depends on SEV + ++config HYGON_CSV_MIG_ACCEL ++ bool ++ depends on SEV ++ + config PC + bool + imply APPLESMC +diff --git a/migration/ram.c b/migration/ram.c +index 4914474b2..e93bd310f 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -61,6 +61,7 @@ + + /* Defines RAM_SAVE_ENCRYPTED_PAGE and RAM_SAVE_SHARED_REGION_LIST */ + #include "target/i386/sev.h" ++#include "target/i386/csv.h" + #include "sysemu/kvm.h" + + #include "hw/boards.h" /* for machine_dump_guest_core() */ +@@ -2595,6 +2596,99 @@ static void postcopy_preempt_reset_channel(RAMState *rs) + } + } + ++#ifdef CONFIG_HYGON_CSV_MIG_ACCEL ++/** ++ * ram_save_encrypted_pages_in_batch - send the given encrypted pages to ++ * the stream. ++ */ ++static int ++ram_save_encrypted_pages_in_batch(RAMState *rs, PageSearchStatus *pss) ++{ ++ int ret; ++ int tmppages = 0, pages = 0; ++ RAMBlock *block = pss->block; ++ ram_addr_t offset = pss->page << TARGET_PAGE_BITS; ++ ram_addr_t start_offset = 0; ++ uint32_t host_len = 0; ++ uint8_t *p; ++ uint64_t bytes_xmit; ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ ConfidentialGuestSupportClass *cgs_class = ++ (ConfidentialGuestSupportClass *)object_get_class(OBJECT(ms->cgs)); ++ struct ConfidentialGuestMemoryEncryptionOps *ops = ++ cgs_class->memory_encryption_ops; ++ ++ do { ++ /* Check the pages is dirty and if it is send it */ ++ if (!migration_bitmap_clear_dirty(rs, block, pss->page)) { ++ pss->page++; ++ continue; ++ } ++ ++ /* Process the unencrypted page */ ++ if (!encrypted_test_list(rs, block, pss->page)) { ++ tmppages = ram_save_target_page(rs, pss); ++ } else { ++ /* Caculate the offset and host virtual address of the page */ ++ offset = pss->page << TARGET_PAGE_BITS; ++ p = block->host + offset; ++ ++ /* Record the offset and host virtual address of the first ++ * page in this loop which will be used below. ++ */ ++ if (host_len == 0) { ++ start_offset = offset | RAM_SAVE_FLAG_ENCRYPTED_DATA; ++ } else { ++ offset |= (RAM_SAVE_FLAG_ENCRYPTED_DATA | RAM_SAVE_FLAG_CONTINUE); ++ } ++ ++ /* Queue the outgoing page if the page is not zero page. ++ * If the queued pages are up to the outgoing page window size, ++ * process them below. ++ */ ++ if (ops->queue_outgoing_page(p, TARGET_PAGE_SIZE, offset)) ++ return -1; ++ ++ tmppages = 1; ++ host_len += TARGET_PAGE_SIZE; ++ ram_counters.normal++; ++ } ++ ++ if (tmppages < 0) { ++ return tmppages; ++ } ++ ++ pages += tmppages; ++ ++ pss->page++; ++ } while (offset_in_ramblock(block, pss->page << TARGET_PAGE_BITS) && ++ host_len < CSV_OUTGOING_PAGE_WINDOW_SIZE); ++ ++ /* Check if there are any queued pages */ ++ if (host_len != 0) { ++ ram_counters.transferred += ++ save_page_header(rs, rs->f, block, start_offset); ++ /* if only one page queued, flag is BATCH_END, else flag is BATCH */ ++ if (host_len > TARGET_PAGE_SIZE) ++ qemu_put_be32(rs->f, RAM_SAVE_ENCRYPTED_PAGE_BATCH); ++ else ++ qemu_put_be32(rs->f, RAM_SAVE_ENCRYPTED_PAGE_BATCH_END); ++ ram_counters.transferred += 4; ++ /* Process the queued pages in batch */ ++ ret = ops->save_queued_outgoing_pages(rs->f, &bytes_xmit); ++ if (ret) { ++ return -1; ++ } ++ ram_counters.transferred += bytes_xmit; ++ } ++ ++ /* The offset we leave with is the last one we looked at */ ++ pss->page--; ++ ++ return pages; ++} ++#endif ++ + /** + * ram_save_host_page: save a whole host page + * +@@ -2630,6 +2724,18 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) + postcopy_preempt_choose_channel(rs, pss); + } + ++#ifdef CONFIG_HYGON_CSV_MIG_ACCEL ++ /* ++ * If command_batch function is enabled and memory encryption is enabled ++ * then use command batch APIs to accelerate the sending process ++ * to write the outgoing buffer to the wire. The encryption APIs ++ * will re-encrypt the data with transport key so that data is prototect ++ * on the wire. ++ */ ++ if (memcrypt_enabled() && is_hygon_cpu() && !migration_in_postcopy()) ++ return ram_save_encrypted_pages_in_batch(rs, pss); ++#endif ++ + do { + if (postcopy_needs_preempt(rs, pss)) { + postcopy_do_preempt(rs, pss); +diff --git a/target/i386/csv.h b/target/i386/csv.h +index c2bee16e9..986bffd95 100644 +--- a/target/i386/csv.h ++++ b/target/i386/csv.h +@@ -16,6 +16,8 @@ + + #include "qapi/qapi-commands-misc-target.h" + ++#ifdef CONFIG_SEV ++ + #include "cpu.h" + + #define CPUID_VENDOR_HYGON_EBX 0x6f677948 /* "Hygo" */ +@@ -38,6 +40,15 @@ static __attribute__((unused)) bool is_hygon_cpu(void) + return false; + } + ++#else ++ ++static __attribute__((unused)) bool is_hygon_cpu(void) ++{ ++ return false; ++} ++ ++#endif ++ + #ifdef CONFIG_CSV + bool csv_enabled(void); + #else +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 291046514..48f8b34ca 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -43,6 +43,7 @@ typedef struct SevKernelLoaderContext { + + #define RAM_SAVE_ENCRYPTED_PAGE_BATCH 0x4 + #define RAM_SAVE_ENCRYPTED_PAGE_BATCH_END 0x5 ++#define CSV_OUTGOING_PAGE_WINDOW_SIZE (4094 * TARGET_PAGE_SIZE) + + #ifdef CONFIG_SEV + bool sev_enabled(void); +-- +2.31.1 + diff --git a/0033-anolis-migration-ram-Accelerate-the-loading-of-CSV-g.patch b/0033-anolis-migration-ram-Accelerate-the-loading-of-CSV-g.patch new file mode 100644 index 0000000..8efc4da --- /dev/null +++ b/0033-anolis-migration-ram-Accelerate-the-loading-of-CSV-g.patch @@ -0,0 +1,37 @@ +From f856ec7c4abed4c42ad1068016180704a177c0f7 Mon Sep 17 00:00:00 2001 +From: fangbaoshun +Date: Mon, 2 Aug 2021 14:49:45 +0800 +Subject: [PATCH 24/28] anolis: migration/ram: Accelerate the loading of CSV + guest's encrypted pages + +When memory encryption is enabled, the guest memory will be encrypted with +the guest specific key. The patch introduces an accelerate solution which +queued the pages into list and load them togather by COMMAND_BATCH. + +Signed-off-by: hanliyang +--- + migration/ram.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/migration/ram.c b/migration/ram.c +index e93bd310f..1cec031df 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -1442,6 +1442,14 @@ static int load_encrypted_data(QEMUFile *f, uint8_t *ptr) + return ops->load_incoming_page(f, ptr); + } else if (flag == RAM_SAVE_SHARED_REGIONS_LIST) { + return ops->load_incoming_shared_regions_list(f); ++ } else if (flag == RAM_SAVE_ENCRYPTED_PAGE_BATCH) { ++ return ops->queue_incoming_page(f, ptr); ++ } else if (flag == RAM_SAVE_ENCRYPTED_PAGE_BATCH_END) { ++ if (ops->queue_incoming_page(f, ptr)) { ++ error_report("Failed to queue incoming data"); ++ return -EINVAL; ++ } ++ return ops->load_queued_incoming_pages(f); + } else { + error_report("unknown encrypted flag %x", flag); + return 1; +-- +2.31.1 + diff --git a/0034-anolis-target-i386-csv-Add-support-for-migrate-VMSA-.patch b/0034-anolis-target-i386-csv-Add-support-for-migrate-VMSA-.patch new file mode 100644 index 0000000..b9e7fa7 --- /dev/null +++ b/0034-anolis-target-i386-csv-Add-support-for-migrate-VMSA-.patch @@ -0,0 +1,416 @@ +From f60def186755259971d3920fd7b86055f284e484 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Tue, 7 Jun 2022 15:19:32 +0800 +Subject: [PATCH 25/28] anolis: target/i386/csv: Add support for migrate VMSA + for CSV2 guest + +CSV2 can protect guest's cpu state through memory encryption. Each +vcpu has its corresponding memory, which is also called VMSA, and +is encrypted by guest's specific encrytion key. + +When CSV2 guest exit to host, the vcpu's state will be encrypted +and saved to VMSA, and the VMSA will be decrypted and loaded to cpu +when the guest's vcpu running at next time. + +If user wants to migrate one CSV2 guest to target machine, the VMSA +of the vcpus also should be migrated to target. CSV firmware provides +SEND_UPDATE_VMSA/RECEIVE_UPDATE_VMSA API through which VMSA can be +converted into secure data and transmitted to the remote end (for +example, network transmission). + +The migration of cpu state is identified by CPUState.cpu_index which +may not equals to vcpu id from KVM's perspective. + +When migrate the VMSA, the source QEMU will invoke SEND_UPDATE_VMSA to +generate data correspond to VMSA, after target QEMU received the data, +it will calc target vcpu id in the KVM by CPUState.cpu_index, and then +invoke RECEIVE_UPDATE_VMSA to restore VMSA correspond to vcpu. + +Signed-off-by: hanliyang +--- + include/exec/confidential-guest-support.h | 6 + + linux-headers/linux/kvm.h | 16 ++ + migration/ram.c | 30 ++++ + target/i386/sev.c | 196 ++++++++++++++++++++++ + target/i386/sev.h | 3 + + target/i386/trace-events | 2 + + 6 files changed, 253 insertions(+) + +diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h +index f6a30fab3..a069bad9d 100644 +--- a/include/exec/confidential-guest-support.h ++++ b/include/exec/confidential-guest-support.h +@@ -90,6 +90,12 @@ struct ConfidentialGuestMemoryEncryptionOps { + + /* Load the incoming encrypted pages queued in list into guest memory */ + int (*load_queued_incoming_pages)(QEMUFile *f); ++ ++ /* Write the encrypted cpu state */ ++ int (*save_outgoing_cpu_state)(QEMUFile *f); ++ ++ /* Load the encrypted cpu state */ ++ int (*load_incoming_cpu_state)(QEMUFile *f); + }; + + typedef struct ConfidentialGuestSupportClass { +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index b90f4eb31..0f0ed2e14 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -1971,6 +1971,14 @@ struct kvm_sev_send_update_data { + __u32 trans_len; + }; + ++struct kvm_sev_send_update_vmsa { ++ __u32 vcpu_id; ++ __u64 hdr_uaddr; ++ __u32 hdr_len; ++ __u64 trans_uaddr; ++ __u32 trans_len; ++}; ++ + struct kvm_sev_receive_start { + __u32 handle; + __u32 policy; +@@ -1989,6 +1997,14 @@ struct kvm_sev_receive_update_data { + __u32 trans_len; + }; + ++struct kvm_sev_receive_update_vmsa { ++ __u32 vcpu_id; ++ __u64 hdr_uaddr; ++ __u32 hdr_len; ++ __u64 trans_uaddr; ++ __u32 trans_len; ++}; ++ + /* CSV command */ + enum csv_cmd_id { + KVM_CSV_NR_MIN = 0xc0, +diff --git a/migration/ram.c b/migration/ram.c +index 1cec031df..9586550dc 100644 +--- a/migration/ram.c ++++ b/migration/ram.c +@@ -1426,6 +1426,23 @@ static int ram_save_shared_region_list(RAMState *rs, QEMUFile *f) + return ops->save_outgoing_shared_regions_list(rs->f); + } + ++/** ++ * ram_save_encrypted_cpu_state: send the encrypted cpu state ++ */ ++static int ram_save_encrypted_cpu_state(RAMState *rs, QEMUFile *f) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ ConfidentialGuestSupportClass *cgs_class = ++ (ConfidentialGuestSupportClass *) object_get_class(OBJECT(ms->cgs)); ++ struct ConfidentialGuestMemoryEncryptionOps *ops = ++ cgs_class->memory_encryption_ops; ++ ++ save_page_header(rs, rs->f, rs->last_seen_block, ++ RAM_SAVE_FLAG_ENCRYPTED_DATA); ++ qemu_put_be32(rs->f, RAM_SAVE_ENCRYPTED_CPU_STATE); ++ return ops->save_outgoing_cpu_state(rs->f); ++} ++ + static int load_encrypted_data(QEMUFile *f, uint8_t *ptr) + { + MachineState *ms = MACHINE(qdev_get_machine()); +@@ -1450,6 +1467,8 @@ static int load_encrypted_data(QEMUFile *f, uint8_t *ptr) + return -EINVAL; + } + return ops->load_queued_incoming_pages(f); ++ } else if (flag == RAM_SAVE_ENCRYPTED_CPU_STATE) { ++ return ops->load_incoming_cpu_state(f); + } else { + error_report("unknown encrypted flag %x", flag); + return 1; +@@ -3715,6 +3734,17 @@ static int ram_save_complete(QEMUFile *f, void *opaque) + if (memcrypt_enabled()) { + ret = ram_save_shared_region_list(rs, f); + } ++ ++ /* ++ * send the encrypted cpu state, for example, CSV2 guest's ++ * vmsa for each vcpu. ++ */ ++ if (!ret && memcrypt_enabled() && is_hygon_cpu()) { ++ ret = ram_save_encrypted_cpu_state(rs, f); ++ if (ret) { ++ error_report("Failed to save encrypted cpu state"); ++ } ++ } + } + + if (ret < 0) { +diff --git a/target/i386/sev.c b/target/i386/sev.c +index e33263076..6051b9f6c 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -100,6 +100,10 @@ struct SevGuestState { + gchar *send_packet_hdr; + size_t send_packet_hdr_len; + ++ /* needed by live migration of HYGON CSV2 guest */ ++ gchar *send_vmsa_packet_hdr; ++ size_t send_vmsa_packet_hdr_len; ++ + uint32_t reset_cs; + uint32_t reset_ip; + bool reset_data_valid; +@@ -193,6 +197,9 @@ static const char *const sev_fw_errlist[] = { + #define SHARED_REGION_LIST_CONT 0x1 + #define SHARED_REGION_LIST_END 0x2 + ++#define ENCRYPTED_CPU_STATE_CONT 0x1 ++#define ENCRYPTED_CPU_STATE_END 0x2 ++ + static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .save_setup = sev_save_setup, + .save_outgoing_page = sev_save_outgoing_page, +@@ -204,6 +211,8 @@ static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .save_queued_outgoing_pages = csv_save_queued_outgoing_pages, + .queue_incoming_page = csv_queue_incoming_page, + .load_queued_incoming_pages = csv_load_queued_incoming_pages, ++ .save_outgoing_cpu_state = csv_save_outgoing_cpu_state, ++ .load_incoming_cpu_state = csv_load_incoming_cpu_state, + }; + + static int +@@ -1060,6 +1069,9 @@ sev_send_finish(void) + } + + g_free(sev_guest->send_packet_hdr); ++ if (sev_es_enabled() && is_hygon_cpu()) { ++ g_free(sev_guest->send_vmsa_packet_hdr); ++ } + sev_set_guest_state(sev_guest, SEV_STATE_RUNNING); + } + +@@ -2254,6 +2266,190 @@ int csv_load_queued_incoming_pages(QEMUFile *f) + return csv_receive_update_data_batch(s); + } + ++static int ++sev_send_vmsa_get_packet_len(int *fw_err) ++{ ++ int ret; ++ struct kvm_sev_send_update_vmsa update = { 0, }; ++ ++ ret = sev_ioctl(sev_guest->sev_fd, KVM_SEV_SEND_UPDATE_VMSA, ++ &update, fw_err); ++ if (*fw_err != SEV_RET_INVALID_LEN) { ++ ret = -1; ++ error_report("%s: failed to get session length ret=%d fw_error=%d '%s'", ++ __func__, ret, *fw_err, fw_error_to_str(*fw_err)); ++ goto err; ++ } ++ ++ ret = update.hdr_len; ++ ++err: ++ return ret; ++} ++ ++static int ++sev_send_update_vmsa(SevGuestState *s, QEMUFile *f, uint32_t cpu_id, ++ uint32_t cpu_index, uint32_t size) ++{ ++ int ret, fw_error; ++ guchar *trans = NULL; ++ struct kvm_sev_send_update_vmsa update = {}; ++ ++ /* ++ * If this is first call then query the packet header bytes and allocate ++ * the packet buffer. ++ */ ++ if (!s->send_vmsa_packet_hdr) { ++ s->send_vmsa_packet_hdr_len = sev_send_vmsa_get_packet_len(&fw_error); ++ if (s->send_vmsa_packet_hdr_len < 1) { ++ error_report("%s: SEND_UPDATE_VMSA fw_error=%d '%s'", ++ __func__, fw_error, fw_error_to_str(fw_error)); ++ return 1; ++ } ++ ++ s->send_vmsa_packet_hdr = g_new(gchar, s->send_vmsa_packet_hdr_len); ++ } ++ ++ /* allocate transport buffer */ ++ trans = g_new(guchar, size); ++ ++ update.vcpu_id = cpu_id; ++ update.hdr_uaddr = (uintptr_t)s->send_vmsa_packet_hdr; ++ update.hdr_len = s->send_vmsa_packet_hdr_len; ++ update.trans_uaddr = (uintptr_t)trans; ++ update.trans_len = size; ++ ++ trace_kvm_sev_send_update_vmsa(cpu_id, cpu_index, trans, size); ++ ++ ret = sev_ioctl(s->sev_fd, KVM_SEV_SEND_UPDATE_VMSA, &update, &fw_error); ++ if (ret) { ++ error_report("%s: SEND_UPDATE_VMSA ret=%d fw_error=%d '%s'", ++ __func__, ret, fw_error, fw_error_to_str(fw_error)); ++ goto err; ++ } ++ ++ /* ++ * Migration of vCPU's VMState according to the instance_id ++ * (i.e. CPUState.cpu_index) ++ */ ++ qemu_put_be32(f, sizeof(uint32_t)); ++ qemu_put_buffer(f, (uint8_t *)&cpu_index, sizeof(uint32_t)); ++ ++ qemu_put_be32(f, update.hdr_len); ++ qemu_put_buffer(f, (uint8_t *)update.hdr_uaddr, update.hdr_len); ++ ++ qemu_put_be32(f, update.trans_len); ++ qemu_put_buffer(f, (uint8_t *)update.trans_uaddr, update.trans_len); ++ ++err: ++ g_free(trans); ++ return ret; ++} ++ ++int csv_save_outgoing_cpu_state(QEMUFile *f) ++{ ++ SevGuestState *s = sev_guest; ++ CPUState *cpu; ++ int ret = 0; ++ ++ /* Only support migrate VMSAs for HYGON CSV2 guest */ ++ if (!sev_es_enabled() || !is_hygon_cpu()) { ++ return 0; ++ } ++ ++ CPU_FOREACH(cpu) { ++ qemu_put_be32(f, ENCRYPTED_CPU_STATE_CONT); ++ ret = sev_send_update_vmsa(s, f, kvm_arch_vcpu_id(cpu), ++ cpu->cpu_index, TARGET_PAGE_SIZE); ++ if (ret) { ++ goto err; ++ } ++ } ++ ++ qemu_put_be32(f, ENCRYPTED_CPU_STATE_END); ++ ++err: ++ return ret; ++} ++ ++static int sev_receive_update_vmsa(QEMUFile *f) ++{ ++ int ret = 1, fw_error = 0; ++ CPUState *cpu; ++ uint32_t cpu_index, cpu_id = 0; ++ gchar *hdr = NULL, *trans = NULL; ++ struct kvm_sev_receive_update_vmsa update = {}; ++ ++ /* get cpu index buffer */ ++ assert(qemu_get_be32(f) == sizeof(uint32_t)); ++ qemu_get_buffer(f, (uint8_t *)&cpu_index, sizeof(uint32_t)); ++ ++ CPU_FOREACH(cpu) { ++ if (cpu->cpu_index == cpu_index) { ++ cpu_id = kvm_arch_vcpu_id(cpu); ++ break; ++ } ++ } ++ update.vcpu_id = cpu_id; ++ ++ /* get packet header */ ++ update.hdr_len = qemu_get_be32(f); ++ if (!check_blob_length(update.hdr_len)) { ++ return 1; ++ } ++ ++ hdr = g_new(gchar, update.hdr_len); ++ qemu_get_buffer(f, (uint8_t *)hdr, update.hdr_len); ++ update.hdr_uaddr = (uintptr_t)hdr; ++ ++ /* get transport buffer */ ++ update.trans_len = qemu_get_be32(f); ++ if (!check_blob_length(update.trans_len)) { ++ goto err; ++ } ++ ++ trans = g_new(gchar, update.trans_len); ++ update.trans_uaddr = (uintptr_t)trans; ++ qemu_get_buffer(f, (uint8_t *)update.trans_uaddr, update.trans_len); ++ ++ trace_kvm_sev_receive_update_vmsa(cpu_id, cpu_index, ++ trans, update.trans_len, hdr, update.hdr_len); ++ ++ ret = sev_ioctl(sev_guest->sev_fd, KVM_SEV_RECEIVE_UPDATE_VMSA, ++ &update, &fw_error); ++ if (ret) { ++ error_report("Error RECEIVE_UPDATE_VMSA ret=%d fw_error=%d '%s'", ++ ret, fw_error, fw_error_to_str(fw_error)); ++ } ++ ++err: ++ g_free(trans); ++ g_free(hdr); ++ return ret; ++} ++ ++int csv_load_incoming_cpu_state(QEMUFile *f) ++{ ++ int status, ret = 0; ++ ++ /* Only support migrate VMSAs for HYGON CSV2 guest */ ++ if (!sev_es_enabled() || !is_hygon_cpu()) { ++ return 0; ++ } ++ ++ status = qemu_get_be32(f); ++ while (status == ENCRYPTED_CPU_STATE_CONT) { ++ ret = sev_receive_update_vmsa(f); ++ if (ret) { ++ break; ++ } ++ ++ status = qemu_get_be32(f); ++ } ++ ++ return ret; ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 48f8b34ca..f76f443f6 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -44,6 +44,7 @@ typedef struct SevKernelLoaderContext { + #define RAM_SAVE_ENCRYPTED_PAGE_BATCH 0x4 + #define RAM_SAVE_ENCRYPTED_PAGE_BATCH_END 0x5 + #define CSV_OUTGOING_PAGE_WINDOW_SIZE (4094 * TARGET_PAGE_SIZE) ++#define RAM_SAVE_ENCRYPTED_CPU_STATE 0x6 + + #ifdef CONFIG_SEV + bool sev_enabled(void); +@@ -82,6 +83,8 @@ int csv_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr); + int csv_save_queued_outgoing_pages(QEMUFile *f, uint64_t *bytes_sent); + int csv_queue_incoming_page(QEMUFile *f, uint8_t *ptr); + int csv_load_queued_incoming_pages(QEMUFile *f); ++int csv_save_outgoing_cpu_state(QEMUFile *f); ++int csv_load_incoming_cpu_state(QEMUFile *f); + + struct sev_ops { + int (*sev_ioctl)(int fd, int cmd, void *data, int *error); +diff --git a/target/i386/trace-events b/target/i386/trace-events +index e32b0319b..60a4609c0 100644 +--- a/target/i386/trace-events ++++ b/target/i386/trace-events +@@ -17,6 +17,8 @@ kvm_sev_send_finish(void) "" + kvm_sev_receive_start(int policy, void *session, void *pdh) "policy 0x%x session %p pdh %p" + kvm_sev_receive_update_data(void *src, void *dst, int len, void *hdr, int hdr_len) "guest %p trans %p len %d hdr %p hdr_len %d" + kvm_sev_receive_finish(void) "" ++kvm_sev_send_update_vmsa(uint32_t cpu_id, uint32_t cpu_index, void *dst, int len) "cpu_id %d cpu_index %d trans %p len %d" ++kvm_sev_receive_update_vmsa(uint32_t cpu_id, uint32_t cpu_index, void *src, int len, void *hdr, int hdr_len) "cpu_id %d cpu_index %d trans %p len %d hdr %p hdr_len %d" + + # csv.c + kvm_csv_launch_encrypt_data(uint64_t gpa, void *addr, uint64_t len) "gpa 0x%" PRIx64 "addr %p len 0x%" PRIu64 +-- +2.31.1 + diff --git a/0035-anolis-target-i386-get-set-migrate-GHCB-state.patch b/0035-anolis-target-i386-get-set-migrate-GHCB-state.patch new file mode 100644 index 0000000..9d719e7 --- /dev/null +++ b/0035-anolis-target-i386-get-set-migrate-GHCB-state.patch @@ -0,0 +1,175 @@ +From bbd2568711b714d4b8019cfce832f814273ecf8c Mon Sep 17 00:00:00 2001 +From: panpingsheng +Date: Sat, 12 Jun 2021 15:15:29 +0800 +Subject: [PATCH 26/28] anolis: target/i386: get/set/migrate GHCB state + +GHCB state is necessary to CSV2 guest when migrating to target. + +Add GHCB related definition, it also adds corresponding part +to kvm_get/put, and vmstate. + +Signed-off-by: hanliyang +--- + include/sysemu/kvm.h | 1 + + linux-headers/linux/kvm.h | 1 + + target/i386/cpu.h | 4 ++++ + target/i386/kvm/kvm.c | 11 +++++++++++ + target/i386/machine.c | 24 ++++++++++++++++++++++++ + target/i386/sev.c | 10 ++++++++++ + 6 files changed, 51 insertions(+) + +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index c4ff7d9a4..d6669e204 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -46,6 +46,7 @@ extern bool kvm_readonly_mem_allowed; + extern bool kvm_direct_msi_allowed; + extern bool kvm_ioeventfd_any_length_allowed; + extern bool kvm_msi_use_devid; ++extern bool kvm_has_msr_ghcb; + + #define kvm_enabled() (kvm_allowed) + /** +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index 0f0ed2e14..0ae5cf192 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -1176,6 +1176,7 @@ struct kvm_ppc_resize_hpt { + #define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220 + #define KVM_CAP_S390_ZPCI_OP 221 + #define KVM_CAP_S390_CPU_TOPOLOGY 222 ++#define KVM_CAP_SEV_ES_GHCB 500 + + #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) + +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index d4bc19577..f17184390 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -516,6 +516,8 @@ typedef enum X86Seg { + + #define MSR_VM_HSAVE_PA 0xc0010117 + ++#define MSR_AMD64_SEV_ES_GHCB 0xc0010130 ++ + #define MSR_IA32_XFD 0x000001c4 + #define MSR_IA32_XFD_ERR 0x000001c5 + +@@ -1811,6 +1813,8 @@ typedef struct CPUArchState { + TPRAccess tpr_access_type; + + unsigned nr_dies; ++ ++ uint64_t ghcb_gpa; + } CPUX86State; + + struct kvm_msrs; +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 31667fdde..4bf0aa58b 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -3599,6 +3599,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level) + } + } + ++ if (kvm_has_msr_ghcb) { ++ kvm_msr_entry_add(cpu, MSR_AMD64_SEV_ES_GHCB, env->ghcb_gpa); ++ } ++ + return kvm_buf_set_msrs(cpu); + } + +@@ -4000,6 +4004,10 @@ static int kvm_get_msrs(X86CPU *cpu) + } + } + ++ if (kvm_has_msr_ghcb) { ++ kvm_msr_entry_add(cpu, MSR_AMD64_SEV_ES_GHCB, 0); ++ } ++ + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); + if (ret < 0) { + return ret; +@@ -4320,6 +4328,9 @@ static int kvm_get_msrs(X86CPU *cpu) + case MSR_ARCH_LBR_INFO_0 ... MSR_ARCH_LBR_INFO_0 + 31: + env->lbr_records[index - MSR_ARCH_LBR_INFO_0].info = msrs[i].data; + break; ++ case MSR_AMD64_SEV_ES_GHCB: ++ env->ghcb_gpa = msrs[i].data; ++ break; + } + } + +diff --git a/target/i386/machine.c b/target/i386/machine.c +index 310b12523..e3c57f0f0 100644 +--- a/target/i386/machine.c ++++ b/target/i386/machine.c +@@ -1581,6 +1581,27 @@ static const VMStateDescription vmstate_triple_fault = { + } + }; + ++#if defined(CONFIG_KVM) && defined(TARGET_X86_64) ++static bool msr_ghcb_gpa_needed(void *opaque) ++{ ++ X86CPU *cpu = opaque; ++ CPUX86State *env = &cpu->env; ++ ++ return env->ghcb_gpa != 0; ++} ++ ++static const VMStateDescription vmstate_msr_ghcb_gpa = { ++ .name = "cpu/svm_msr_ghcb_gpa", ++ .version_id = 1, ++ .minimum_version_id = 1, ++ .needed = msr_ghcb_gpa_needed, ++ .fields = (VMStateField[]) { ++ VMSTATE_UINT64(env.ghcb_gpa, X86CPU), ++ VMSTATE_END_OF_LIST() ++ } ++}; ++#endif ++ + const VMStateDescription vmstate_x86_cpu = { + .name = "cpu", + .version_id = 12, +@@ -1726,6 +1747,9 @@ const VMStateDescription vmstate_x86_cpu = { + #endif + &vmstate_arch_lbr, + &vmstate_triple_fault, ++#if defined(CONFIG_KVM) && defined(TARGET_X86_64) ++ &vmstate_msr_ghcb_gpa, ++#endif + NULL + } + }; +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 6051b9f6c..65154abbe 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -215,6 +215,8 @@ static struct ConfidentialGuestMemoryEncryptionOps sev_memory_encryption_ops = { + .load_incoming_cpu_state = csv_load_incoming_cpu_state, + }; + ++bool kvm_has_msr_ghcb; ++ + static int + sev_ioctl(int fd, int cmd, void *data, int *error) + { +@@ -1214,6 +1216,14 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + cgs_class->memory_encryption_ops = &sev_memory_encryption_ops; + QTAILQ_INIT(&sev->shared_regions_list); + ++ /* Determine whether support MSR_AMD64_SEV_ES_GHCB */ ++ if (sev_es_enabled()) { ++ kvm_has_msr_ghcb = ++ kvm_vm_check_extension(kvm_state, KVM_CAP_SEV_ES_GHCB); ++ } else { ++ kvm_has_msr_ghcb = false; ++ } ++ + cgs->ready = true; + + return 0; +-- +2.31.1 + diff --git a/0036-anolis-target-i386-kvm-Return-resettable-when-emulat.patch b/0036-anolis-target-i386-kvm-Return-resettable-when-emulat.patch new file mode 100644 index 0000000..55ffdc1 --- /dev/null +++ b/0036-anolis-target-i386-kvm-Return-resettable-when-emulat.patch @@ -0,0 +1,39 @@ +From 88d5d2fd16b86eafa564d7b1d3a3e2653bdd39ae Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Sun, 19 Jun 2022 16:49:45 +0800 +Subject: [PATCH 27/28] anolis: target/i386/kvm: Return resettable when emulate + HYGON CSV2 guest + +SEV-ES guest will be terminated by QEMU when receive reboot request. +In order to support reboot for CSV2 guest, report resettable in +kvm_arch_cpu_check_are_resettable(). + +Signed-off-by: hanliyang +--- + target/i386/kvm/kvm.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 4bf0aa58b..7ad29afee 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -31,6 +31,7 @@ + #include "sysemu/runstate.h" + #include "kvm_i386.h" + #include "sev.h" ++#include "csv.h" + #include "hyperv.h" + #include "hyperv-proto.h" + +@@ -5721,7 +5722,7 @@ bool kvm_has_waitpkg(void) + + bool kvm_arch_cpu_check_are_resettable(void) + { +- return !sev_es_enabled(); ++ return !(sev_es_enabled() && !is_hygon_cpu()) && !csv_enabled(); + } + + #define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 +-- +2.31.1 + diff --git a/0037-anolis-kvm-Add-support-for-CSV2-reboot.patch b/0037-anolis-kvm-Add-support-for-CSV2-reboot.patch new file mode 100644 index 0000000..7931cab --- /dev/null +++ b/0037-anolis-kvm-Add-support-for-CSV2-reboot.patch @@ -0,0 +1,170 @@ +From 1cae94dd27d3ad0f56728d46230b49ad96ebd02d Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Thu, 15 Apr 2021 08:32:24 -0400 +Subject: [PATCH 28/28] anolis: kvm: Add support for CSV2 reboot + +Linux will set vcpu.arch.guest_state_protected to true after execute +LAUNCH_UPDATE_VMSA successfully, and then KVM will prevent any changes +to VMCB State Save Area. + +In order to support CSV2 guest reboot, calls cpus_control_pre_system_reset() +to set vcpu.arch.guest_state_protected to false, and calls +cpus_control_post_system_reset() to restore VMSA of guest's vcpu with +data generated by LAUNCH_UPDATE_VMSA. + +In addition, for memory encrypted guest, additional works may be +required during system reset, such as flushing the cache. The function +cpus_control_post_system_reset() hints linux to flush caches of guest +memory. + +Signed-off-by: hanliyang +--- + accel/kvm/kvm-accel-ops.c | 3 +++ + accel/kvm/kvm-all.c | 10 ++++++++++ + accel/kvm/kvm-cpus.h | 3 +++ + include/sysemu/accel-ops.h | 3 +++ + include/sysemu/cpus.h | 2 ++ + linux-headers/linux/kvm.h | 4 ++++ + softmmu/cpus.c | 14 ++++++++++++++ + softmmu/runstate.c | 4 ++++ + 8 files changed, 43 insertions(+) + +diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c +index fbf4fe349..a8efba9d7 100644 +--- a/accel/kvm/kvm-accel-ops.c ++++ b/accel/kvm/kvm-accel-ops.c +@@ -104,6 +104,9 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data) + ops->remove_breakpoint = kvm_remove_breakpoint; + ops->remove_all_breakpoints = kvm_remove_all_breakpoints; + #endif ++ ++ ops->control_pre_system_reset = kvm_cpus_control_pre_system_reset; ++ ops->control_post_system_reset = kvm_cpus_control_post_system_reset; + } + + static const TypeInfo kvm_accel_ops_type = { +diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c +index 0a127ece1..46f67b38f 100644 +--- a/accel/kvm/kvm-all.c ++++ b/accel/kvm/kvm-all.c +@@ -2755,6 +2755,16 @@ void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu) + run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); + } + ++void kvm_cpus_control_pre_system_reset(void) ++{ ++ kvm_vm_ioctl(kvm_state, KVM_CONTROL_VCPU_PRE_SYSTEM_RESET, NULL); ++} ++ ++void kvm_cpus_control_post_system_reset(void) ++{ ++ kvm_vm_ioctl(kvm_state, KVM_CONTROL_VCPU_POST_SYSTEM_RESET, NULL); ++} ++ + #ifdef KVM_HAVE_MCE_INJECTION + static __thread void *pending_sigbus_addr; + static __thread int pending_sigbus_code; +diff --git a/accel/kvm/kvm-cpus.h b/accel/kvm/kvm-cpus.h +index fd63fe6a5..cd014b115 100644 +--- a/accel/kvm/kvm-cpus.h ++++ b/accel/kvm/kvm-cpus.h +@@ -23,4 +23,7 @@ int kvm_insert_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len); + int kvm_remove_breakpoint(CPUState *cpu, int type, hwaddr addr, hwaddr len); + void kvm_remove_all_breakpoints(CPUState *cpu); + ++void kvm_cpus_control_pre_system_reset(void); ++void kvm_cpus_control_post_system_reset(void); ++ + #endif /* KVM_CPUS_H */ +diff --git a/include/sysemu/accel-ops.h b/include/sysemu/accel-ops.h +index 8cc7996de..a840c2f39 100644 +--- a/include/sysemu/accel-ops.h ++++ b/include/sysemu/accel-ops.h +@@ -51,6 +51,9 @@ struct AccelOpsClass { + int (*insert_breakpoint)(CPUState *cpu, int type, hwaddr addr, hwaddr len); + int (*remove_breakpoint)(CPUState *cpu, int type, hwaddr addr, hwaddr len); + void (*remove_all_breakpoints)(CPUState *cpu); ++ ++ void (*control_pre_system_reset)(void); ++ void (*control_post_system_reset)(void); + }; + + #endif /* ACCEL_OPS_H */ +diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h +index 1bace3379..41eabffb2 100644 +--- a/include/sysemu/cpus.h ++++ b/include/sysemu/cpus.h +@@ -45,6 +45,8 @@ extern int icount_align_option; + void qemu_cpu_kick_self(void); + + bool cpus_are_resettable(void); ++void cpus_control_pre_system_reset(void); ++void cpus_control_post_system_reset(void); + + void cpu_synchronize_all_states(void); + void cpu_synchronize_all_post_reset(void); +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index 0ae5cf192..c6692b2f6 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -1552,6 +1552,10 @@ struct kvm_s390_ucas_mapping { + #define KVM_GET_DEVICE_ATTR _IOW(KVMIO, 0xe2, struct kvm_device_attr) + #define KVM_HAS_DEVICE_ATTR _IOW(KVMIO, 0xe3, struct kvm_device_attr) + ++/* ioctls for control vcpu setup during system reset */ ++#define KVM_CONTROL_VCPU_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) ++#define KVM_CONTROL_VCPU_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) ++ + /* + * ioctls for vcpu fds + */ +diff --git a/softmmu/cpus.c b/softmmu/cpus.c +index 5a584a8d5..b8b78c745 100644 +--- a/softmmu/cpus.c ++++ b/softmmu/cpus.c +@@ -200,6 +200,20 @@ bool cpus_are_resettable(void) + return true; + } + ++void cpus_control_pre_system_reset(void) ++{ ++ if (cpus_accel->control_pre_system_reset) { ++ cpus_accel->control_pre_system_reset(); ++ } ++} ++ ++void cpus_control_post_system_reset(void) ++{ ++ if (cpus_accel->control_post_system_reset) { ++ cpus_accel->control_post_system_reset(); ++ } ++} ++ + int64_t cpus_get_virtual_clock(void) + { + /* +diff --git a/softmmu/runstate.c b/softmmu/runstate.c +index 3dd83d5e5..3fa5ed1be 100644 +--- a/softmmu/runstate.c ++++ b/softmmu/runstate.c +@@ -438,6 +438,8 @@ void qemu_system_reset(ShutdownCause reason) + + mc = current_machine ? MACHINE_GET_CLASS(current_machine) : NULL; + ++ cpus_control_pre_system_reset(); ++ + cpu_synchronize_all_states(); + + if (mc && mc->reset) { +@@ -454,6 +456,8 @@ void qemu_system_reset(ShutdownCause reason) + qapi_event_send_reset(shutdown_caused_by_guest(reason), reason); + } + cpu_synchronize_all_post_reset(); ++ ++ cpus_control_post_system_reset(); + } + + /* +-- +2.31.1 + diff --git a/qemu.spec b/qemu.spec index d1512a3..4486b0f 100644 --- a/qemu.spec +++ b/qemu.spec @@ -1,4 +1,4 @@ -%define anolis_release 3 +%define anolis_release 4 %bcond_with check @@ -272,6 +272,34 @@ Patch0006: 0006-anolis-cpu-i386-populate-CPUID-0x8000_001F-when-CSV-.patch Patch0007: 0007-anolis-csv-i386-CSV-guest-do-not-need-register-unreg.patch Patch0008: 0008-anolis-target-i386-csv-load-initial-image-to-private.patch Patch0009: 0009-anolis-vga-force-full-update-for-CSV-guest.patch +Patch0010: 0010-doc-update-AMD-SEV-to-include-Live-migration-flow.patch +Patch0011: 0011-migration.json-add-AMD-SEV-specific-migration-parame.patch +Patch0012: 0012-confidential-guest-support-introduce-ConfidentialGue.patch +Patch0013: 0013-target-i386-sev-provide-callback-to-setup-outgoing-c.patch +Patch0014: 0014-target-i386-sev-do-not-create-launch-context-for-an-.patch +Patch0015: 0015-target-i386-sev-add-support-to-encrypt-the-outgoing-.patch +Patch0016: 0016-target-i386-sev-add-support-to-load-incoming-encrypt.patch +Patch0017: 0017-kvm-Add-support-for-SEV-shared-regions-list-and-KVM_.patch +Patch0018: 0018-migration-add-support-to-migrate-shared-regions-list.patch +Patch0019: 0019-migration-ram-add-support-to-send-encrypted-pages.patch +Patch0020: 0020-migration-ram-Force-encrypted-status-for-flash0-flas.patch +Patch0021: 0021-migration-for-SEV-live-migration-bump-downtime-limit.patch +Patch0022: 0022-kvm-Add-support-for-userspace-MSR-filtering-and-hand.patch +Patch0023: 0023-anolis-migration-ram-Force-encrypted-status-for-VGA-.patch +Patch0024: 0024-anolis-target-i386-sev-Clear-shared_regions_list-whe.patch +Patch0025: 0025-anolis-migration-ram-Fix-calculation-of-gfn-correpon.patch +Patch0026: 0026-anolis-target-i386-csv-Move-is_hygon_cpu-to-header-f.patch +Patch0027: 0027-anolis-target-i386-csv-Read-cert-chain-from-file-whe.patch +Patch0028: 0028-anolis-target-i386-csv-add-support-to-queue-the-outg.patch +Patch0029: 0029-anolis-target-i386-csv-add-support-to-encrypt-the-ou.patch +Patch0030: 0030-anolis-target-i386-csv-add-support-to-queue-the-inco.patch +Patch0031: 0031-anolis-target-i386-csv-add-support-to-load-incoming-.patch +Patch0032: 0032-anolis-migration-ram-Accelerate-the-transmission-of-.patch +Patch0033: 0033-anolis-migration-ram-Accelerate-the-loading-of-CSV-g.patch +Patch0034: 0034-anolis-target-i386-csv-Add-support-for-migrate-VMSA-.patch +Patch0035: 0035-anolis-target-i386-get-set-migrate-GHCB-state.patch +Patch0036: 0036-anolis-target-i386-kvm-Return-resettable-when-emulat.patch +Patch0037: 0037-anolis-kvm-Add-support-for-CSV2-reboot.patch ExclusiveArch: x86_64 aarch64 @@ -1840,6 +1868,37 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \ %endif %changelog +* Tue Nov 22 2023 Liyang Han - 15:7.2.6-4 +- Patch0010: 0010-doc-update-AMD-SEV-to-include-Live-migration-flow.patch +- Patch0011: 0011-migration.json-add-AMD-SEV-specific-migration-parame.patch +- Patch0012: 0012-confidential-guest-support-introduce-ConfidentialGue.patch +- Patch0013: 0013-target-i386-sev-provide-callback-to-setup-outgoing-c.patch +- Patch0014: 0014-target-i386-sev-do-not-create-launch-context-for-an-.patch +- Patch0015: 0015-target-i386-sev-add-support-to-encrypt-the-outgoing-.patch +- Patch0016: 0016-target-i386-sev-add-support-to-load-incoming-encrypt.patch +- Patch0017: 0017-kvm-Add-support-for-SEV-shared-regions-list-and-KVM_.patch +- Patch0018: 0018-migration-add-support-to-migrate-shared-regions-list.patch +- Patch0019: 0019-migration-ram-add-support-to-send-encrypted-pages.patch +- Patch0020: 0020-migration-ram-Force-encrypted-status-for-flash0-flas.patch +- Patch0021: 0021-migration-for-SEV-live-migration-bump-downtime-limit.patch +- Patch0022: 0022-kvm-Add-support-for-userspace-MSR-filtering-and-hand.patch +- Patch0023: 0023-anolis-migration-ram-Force-encrypted-status-for-VGA-.patch +- Patch0024: 0024-anolis-target-i386-sev-Clear-shared_regions_list-whe.patch +- Patch0025: 0025-anolis-migration-ram-Fix-calculation-of-gfn-correpon.patch +- Patch0026: 0026-anolis-target-i386-csv-Move-is_hygon_cpu-to-header-f.patch +- Patch0027: 0027-anolis-target-i386-csv-Read-cert-chain-from-file-whe.patch +- Patch0028: 0028-anolis-target-i386-csv-add-support-to-queue-the-outg.patch +- Patch0029: 0029-anolis-target-i386-csv-add-support-to-encrypt-the-ou.patch +- Patch0030: 0030-anolis-target-i386-csv-add-support-to-queue-the-inco.patch +- Patch0031: 0031-anolis-target-i386-csv-add-support-to-load-incoming-.patch +- Patch0032: 0032-anolis-migration-ram-Accelerate-the-transmission-of-.patch +- Patch0033: 0033-anolis-migration-ram-Accelerate-the-loading-of-CSV-g.patch +- Patch0034: 0034-anolis-target-i386-csv-Add-support-for-migrate-VMSA-.patch +- Patch0035: 0035-anolis-target-i386-get-set-migrate-GHCB-state.patch +- Patch0036: 0036-anolis-target-i386-kvm-Return-resettable-when-emulat.patch +- Patch0037: 0037-anolis-kvm-Add-support-for-CSV2-reboot.patch + (Support Hygon CSV/CSV2 live migration, CSV2 reboot) + * Thu Nov 16 2023 Wenlong Zhang - 15:7.2.6-3 - disable build for loongarch -- Gitee