From 9a64df055e2e9f95722e176757e3d9f9377a34eb Mon Sep 17 00:00:00 2001 From: liupingwei Date: Fri, 31 May 2024 15:39:09 +0800 Subject: [PATCH] Add virtcca cvm feature Signed-off-by: liupingwei --- Add-virtcca-cvm-feature.patch | 986 ++++++++++++++++++++++++++++++++++ qemu.spec | 6 +- 2 files changed, 991 insertions(+), 1 deletion(-) create mode 100644 Add-virtcca-cvm-feature.patch diff --git a/Add-virtcca-cvm-feature.patch b/Add-virtcca-cvm-feature.patch new file mode 100644 index 0000000..899f943 --- /dev/null +++ b/Add-virtcca-cvm-feature.patch @@ -0,0 +1,986 @@ +From e9fbced955bfe23c9edfd233de021791fd0c0383 Mon Sep 17 00:00:00 2001 +From: liupingwei +Date: Fri, 31 May 2024 15:07:20 +0800 +Subject: [PATCH] Add virtcca cvm feature + +Signed-off-by: liupingwei +--- + accel/kvm/kvm-all.c | 36 ++++ + hw/arm/boot.c | 49 +++++ + hw/arm/virt.c | 51 ++++- + include/hw/arm/boot.h | 1 + + include/hw/arm/virt.h | 1 + + include/sysemu/kvm.h | 8 + + linux-headers/asm-arm64/kvm.h | 62 ++++++ + linux-headers/linux/kvm.h | 31 ++- + qapi/qom.json | 30 ++- + target/arm/kvm-tmm.c | 344 ++++++++++++++++++++++++++++++++++ + target/arm/kvm.c | 6 +- + target/arm/kvm64.c | 5 + + target/arm/kvm_arm.h | 10 + + target/arm/meson.build | 1 + + 14 files changed, 628 insertions(+), 7 deletions(-) + create mode 100644 target/arm/kvm-tmm.c + +diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c +index e55b2b6..381e3c8 100644 +--- a/accel/kvm/kvm-all.c ++++ b/accel/kvm/kvm-all.c +@@ -49,6 +49,8 @@ + + #include "hw/boards.h" + ++#include "sysemu/kvm.h" ++ + /* This check must be after config-host.h is included */ + #ifdef CONFIG_EVENTFD + #include +@@ -79,6 +81,9 @@ struct KVMParkedVcpu { + }; + + KVMState *kvm_state; ++ ++bool virtcca_cvm_allowed = false; ++ + bool kvm_kernel_irqchip; + bool kvm_split_irqchip; + bool kvm_async_interrupts_allowed; +@@ -2272,6 +2277,11 @@ uint32_t kvm_dirty_ring_size(void) + return kvm_state->kvm_dirty_ring_size; + } + ++static inline bool kvm_is_virtcca_cvm_type(int type) ++{ ++ return type & VIRTCCA_CVM_TYPE; ++} ++ + static int kvm_init(MachineState *ms) + { + MachineClass *mc = MACHINE_GET_CLASS(ms); +@@ -2356,6 +2366,10 @@ static int kvm_init(MachineState *ms) + type = mc->kvm_type(ms, NULL); + } + ++ if (kvm_is_virtcca_cvm_type(type)) { ++ virtcca_cvm_allowed = true; ++ } ++ + do { + ret = kvm_ioctl(s, KVM_CREATE_VM, type); + } while (ret == -EINTR); +@@ -3455,6 +3469,28 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) + return r; + } + ++int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size, ++ struct kvm_numa_info *numa_info) ++{ ++ KVMState *state = kvm_state; ++ struct kvm_user_data data; ++ int ret; ++ ++ data.loader_start = loader_start; ++ data.image_end = image_end; ++ data.initrd_start = initrd_start; ++ data.dtb_end = dtb_end; ++ data.ram_size = ram_size; ++ memcpy(&data.numa_info, numa_info, sizeof(struct kvm_numa_info)); ++ ++ ret = kvm_vm_ioctl(state, KVM_LOAD_USER_DATA, &data); ++ if (ret < 0) { ++ error_report("%s: KVM_LOAD_USER_DATA failed!\n", __func__); ++ } ++ ++ return ret; ++} ++ + static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, + hwaddr start_addr, hwaddr size) + { +diff --git a/hw/arm/boot.c b/hw/arm/boot.c +index 3d45de1..db69ec6 100644 +--- a/hw/arm/boot.c ++++ b/hw/arm/boot.c +@@ -27,6 +27,7 @@ + #include "qemu/config-file.h" + #include "qemu/option.h" + #include "qemu/units.h" ++#include "kvm_arm.h" + + /* Kernel boot protocol is specified in the kernel docs + * Documentation/arm/Booting and Documentation/arm64/booting.txt +@@ -1255,6 +1256,16 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, + for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { + ARM_CPU(cs)->env.boot_info = info; + } ++ ++ if (kvm_enabled() && virtcca_cvm_enabled()) { ++ if (info->dtb_limit == 0) { ++ info->dtb_limit = info->dtb_start + 0x200000; ++ } ++ kvm_load_user_data(info->loader_start, image_high_addr, info->initrd_start, ++ info->dtb_limit, info->ram_size, (struct kvm_numa_info *)info->numa_info); ++ tmm_add_ram_region(info->loader_start, image_high_addr - info->loader_start, ++ info->initrd_start, info->dtb_limit - info->initrd_start, true); ++ } + } + + static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info) +@@ -1344,6 +1355,39 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) + info->initrd_filename = ms->initrd_filename; + info->dtb_filename = ms->dtb; + info->dtb_limit = 0; ++ if (kvm_enabled() && virtcca_cvm_enabled()) { ++ info->ram_size = ms->ram_size; ++ info->numa_info = g_malloc(sizeof(struct kvm_numa_info)); ++ struct kvm_numa_info *numa_info = (struct kvm_numa_info *) info->numa_info; ++ if (ms->numa_state != NULL && ms->numa_state->num_nodes > 0) { ++ numa_info->numa_cnt = ms->numa_state->num_nodes; ++ uint64_t mem_base = info->loader_start; ++ for (int64_t i = 0; i < ms->numa_state->num_nodes && i < MAX_NUMA_NODE; i++) { ++ uint64_t mem_len = ms->numa_state->nodes[i].node_mem; ++ numa_info->numa_nodes[i].numa_id = i; ++ numa_info->numa_nodes[i].ipa_start = mem_base; ++ numa_info->numa_nodes[i].ipa_size = mem_len; ++ memcpy(numa_info->numa_nodes[i].host_numa_nodes, ms->numa_state->nodes[i].node_memdev->host_nodes, ++ MAX_NODES / BITS_PER_LONG * sizeof(uint64_t)); ++ mem_base += mem_len; ++ } ++ } else { ++ numa_info->numa_cnt = 1; ++ numa_info->numa_nodes[0].numa_id = 0; ++ numa_info->numa_nodes[0].ipa_start = info->loader_start; ++ numa_info->numa_nodes[0].ipa_size = info->ram_size; ++ memset(numa_info->numa_nodes[0].host_numa_nodes, 0, MAX_NODES / BITS_PER_LONG * sizeof(uint64_t)); ++ } ++ ++ for (int cpu = ms->smp.cpus - 1; cpu >= 0; cpu--) { ++ ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); ++ CPUState *cs = CPU(armcpu); ++ uint64_t node_id = 0; ++ if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) ++ node_id = ms->possible_cpus->cpus[cs->cpu_index].props.node_id; ++ bitmap_set((unsigned long *)numa_info->numa_nodes[node_id].cpu_id, cpu, 1); ++ } ++ } + + /* Load the kernel. */ + if (!info->kernel_filename || info->firmware_loaded) { +@@ -1352,6 +1396,11 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) + arm_setup_direct_kernel_boot(cpu, info); + } + ++ if (kvm_enabled() && virtcca_cvm_enabled()) { ++ g_free(info->numa_info); ++ info->numa_info = NULL; ++ } ++ + if (!info->skip_dtb_autoload && have_dtb(info)) { + if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) { + exit(1); +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index 93554cc..26dc568 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -1801,6 +1801,19 @@ static void virt_set_memmap(VirtMachineState *vms) + vms->memmap[i] = base_memmap[i]; + } + ++ /* fix VIRT_MEM range */ ++ if (object_property_find(OBJECT(current_machine), "kvm-type")) { ++ g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), ++ "kvm-type", &error_abort); ++ ++ if (!strcmp(kvm_type, "cvm")) { ++ vms->memmap[VIRT_MEM].base = 3 * GiB; ++ vms->memmap[VIRT_MEM].size = ms->ram_size; ++ info_report("[qemu] fix VIRT_MEM range 0x%llx - 0x%llx\n", (unsigned long long)(vms->memmap[VIRT_MEM].base), ++ (unsigned long long)(vms->memmap[VIRT_MEM].base + ms->ram_size)); ++ } ++ } ++ + if (ms->ram_slots > ACPI_MAX_RAM_SLOTS) { + error_report("unsupported number of memory slots: %"PRIu64, + ms->ram_slots); +@@ -2072,7 +2085,7 @@ static void machvirt_init(MachineState *machine) + */ + if (vms->secure && firmware_loaded) { + vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; +- } else if (vms->virt) { ++ } else if (vms->virt || virtcca_cvm_enabled()) { + vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC; + } else { + vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC; +@@ -2118,6 +2131,13 @@ static void machvirt_init(MachineState *machine) + exit(1); + } + ++ if (virtcca_cvm_enabled()) { ++ int ret = kvm_arm_tmm_init(machine->cgs, &error_fatal); ++ if (ret != 0) { ++ error_report("fail to initialize TMM"); ++ exit(1); ++ } ++ } + create_fdt(vms); + qemu_log("cpu init start\n"); + +@@ -2991,6 +3011,15 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, + static int virt_kvm_type(MachineState *ms, const char *type_str) + { + VirtMachineState *vms = VIRT_MACHINE(ms); ++ int virtcca_cvm_type = 0; ++ if (object_property_find(OBJECT(current_machine), "kvm-type")) { ++ g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), ++ "kvm-type", &error_abort); ++ ++ if (!strcmp(kvm_type, "cvm")) { ++ virtcca_cvm_type = VIRTCCA_CVM_TYPE; ++ } ++ } + int max_vm_pa_size, requested_pa_size; + bool fixed_ipa; + +@@ -3020,7 +3049,9 @@ static int virt_kvm_type(MachineState *ms, const char *type_str) + * the implicit legacy 40b IPA setting, in which case the kvm_type + * must be 0. + */ +- return fixed_ipa ? 0 : requested_pa_size; ++ return strcmp(type_str, "cvm") == 0 ? ++ ((fixed_ipa ? 0 : requested_pa_size) | virtcca_cvm_type) : ++ (fixed_ipa ? 0 : requested_pa_size); + } + + static void virt_machine_class_init(ObjectClass *oc, void *data) +@@ -3143,6 +3174,19 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) + + } + ++static char *virt_get_kvm_type(Object *obj, Error **errp G_GNUC_UNUSED) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ return g_strdup(vms->kvm_type); ++} ++ ++static void virt_set_kvm_type(Object *obj, const char *value, Error **errp G_GNUC_UNUSED) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ g_free(vms->kvm_type); ++ vms->kvm_type = g_strdup(value); ++} ++ + static void virt_instance_init(Object *obj) + { + VirtMachineState *vms = VIRT_MACHINE(obj); +@@ -3194,6 +3238,9 @@ static void virt_instance_init(Object *obj) + + vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); + vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); ++ ++ object_property_add_str(obj, "kvm-type", virt_get_kvm_type, virt_set_kvm_type); ++ object_property_set_description(obj, "kvm-type", "CVM or Normal VM"); + } + + static const TypeInfo virt_machine_info = { +diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h +index c3c4d3e..36aa5dd 100644 +--- a/include/hw/arm/boot.h ++++ b/include/hw/arm/boot.h +@@ -36,6 +36,7 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size); + /* arm_boot.c */ + struct arm_boot_info { + uint64_t ram_size; ++ void *numa_info; + const char *kernel_filename; + const char *kernel_cmdline; + const char *initrd_filename; +diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h +index 4ddee19..3fc1adc 100644 +--- a/include/hw/arm/virt.h ++++ b/include/hw/arm/virt.h +@@ -176,6 +176,7 @@ struct VirtMachineState { + PCIBus *bus; + char *oem_id; + char *oem_table_id; ++ char *kvm_type; + }; + + #define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM) +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index 9f52d08..0b5319b 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -19,6 +19,7 @@ + #include "exec/memattrs.h" + #include "qemu/accel.h" + #include "qom/object.h" ++#include "linux-headers/linux/kvm.h" + + #ifdef NEED_CPU_H + # ifdef CONFIG_KVM +@@ -32,6 +33,7 @@ + #ifdef CONFIG_KVM_IS_POSSIBLE + + extern bool kvm_allowed; ++extern bool virtcca_cvm_allowed; + extern bool kvm_kernel_irqchip; + extern bool kvm_split_irqchip; + extern bool kvm_async_interrupts_allowed; +@@ -48,6 +50,8 @@ extern bool kvm_ioeventfd_any_length_allowed; + extern bool kvm_msi_use_devid; + + #define kvm_enabled() (kvm_allowed) ++#define virtcca_cvm_enabled() (virtcca_cvm_allowed) ++#define VIRTCCA_CVM_TYPE (1UL << 8) + /** + * kvm_irqchip_in_kernel: + * +@@ -170,6 +174,7 @@ extern bool kvm_msi_use_devid; + #else + + #define kvm_enabled() (0) ++#define virtcca_cvm_enabled() (0) + #define kvm_irqchip_in_kernel() (false) + #define kvm_irqchip_is_split() (false) + #define kvm_async_interrupts_enabled() (false) +@@ -554,6 +559,9 @@ bool kvm_dirty_ring_enabled(void); + + uint32_t kvm_dirty_ring_size(void); + ++int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size, ++ struct kvm_numa_info *numa_info); ++ + #ifdef __aarch64__ + int kvm_create_shadow_device(PCIDevice *dev); + int kvm_delete_shadow_device(PCIDevice *dev); +diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h +index 3d2ce99..34030a8 100644 +--- a/linux-headers/asm-arm64/kvm.h ++++ b/linux-headers/asm-arm64/kvm.h +@@ -106,6 +106,7 @@ struct kvm_regs { + #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ + #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ + #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ ++#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */ + + struct kvm_vcpu_init { + __u32 target; +@@ -411,6 +412,67 @@ struct kvm_arm_copy_mte_tags { + #define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS + #define KVM_PSCI_RET_DENIED PSCI_RET_DENIED + ++/* KVM_CAP_ARM_TMM on VM fd */ ++#define KVM_CAP_ARM_TMM_CONFIG_CVM 0 ++#define KVM_CAP_ARM_TMM_CREATE_RD 1 ++#define KVM_CAP_ARM_TMM_POPULATE_CVM 2 ++#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3 ++ ++#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0 ++#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1 ++ ++#define KVM_CAP_ARM_TMM_RPV_SIZE 64 ++ ++/* List of configuration items accepted for KVM_CAP_ARM_RME_CONFIG_REALM */ ++#define KVM_CAP_ARM_TMM_CFG_RPV 0 ++#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1 ++#define KVM_CAP_ARM_TMM_CFG_SVE 2 ++#define KVM_CAP_ARM_TMM_CFG_DBG 3 ++#define KVM_CAP_ARM_TMM_CFG_PMU 4 ++ ++struct kvm_cap_arm_tmm_config_item { ++ __u32 cfg; ++ union { ++ /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */ ++ struct { ++ __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE]; ++ }; ++ ++ /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */ ++ struct { ++ __u32 hash_algo; ++ }; ++ ++ /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */ ++ struct { ++ __u32 sve_vq; ++ }; ++ ++ /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */ ++ struct { ++ __u32 num_brps; ++ __u32 num_wrps; ++ }; ++ ++ /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */ ++ struct { ++ __u32 num_pmu_cntrs; ++ }; ++ /* Fix the size of the union */ ++ __u8 reserved[256]; ++ }; ++}; ++ ++#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) ++struct kvm_cap_arm_tmm_populate_region_args { ++ __u64 populate_ipa_base1; ++ __u64 populate_ipa_size1; ++ __u64 populate_ipa_base2; ++ __u64 populate_ipa_size2; ++ __u32 flags; ++ __u32 reserved[3]; ++}; ++ + #endif + + #endif /* __ARM_KVM_H__ */ +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index cd0885f..fa22d85 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -14,6 +14,8 @@ + #include + #include + ++#include "sysemu/numa.h" ++ + #define KVM_API_VERSION 12 + + /* *** Deprecated interfaces *** */ +@@ -1126,6 +1128,7 @@ struct kvm_ppc_resize_hpt { + #define KVM_CAP_X86_NOTIFY_VMEXIT 219 + + #define KVM_CAP_ARM_CPU_FEATURE 555 ++#define KVM_CAP_ARM_TMM 300 + + #define KVM_CAP_ARM_VIRT_MSI_BYPASS 799 + +@@ -1370,6 +1373,32 @@ struct kvm_vfio_spapr_tce { + __s32 tablefd; + }; + ++#define MAX_NUMA_NODE 8 ++#define MAX_CPU_BIT_MAP 4 ++#define MAX_NODE_BIT_MAP (MAX_NODES / BITS_PER_LONG) ++ ++struct kvm_numa_node { ++ __u64 numa_id; ++ __u64 ipa_start; ++ __u64 ipa_size; ++ __u64 host_numa_nodes[MAX_NODE_BIT_MAP]; ++ __u64 cpu_id[MAX_CPU_BIT_MAP]; ++}; ++ ++struct kvm_numa_info { ++ __u64 numa_cnt; ++ struct kvm_numa_node numa_nodes[MAX_NUMA_NODE]; ++}; ++ ++struct kvm_user_data { ++ __u64 loader_start; ++ __u64 image_end; ++ __u64 initrd_start; ++ __u64 dtb_end; ++ __u64 ram_size; ++ struct kvm_numa_info numa_info; ++}; ++ + /* + * ioctls for VM fds + */ +@@ -1388,7 +1417,7 @@ struct kvm_vfio_spapr_tce { + struct kvm_userspace_memory_region) + #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) + #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) +- ++#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data) + /* enable ucontrol for s390 */ + struct kvm_s390_ucas_mapping { + __u64 user_addr; +diff --git a/qapi/qom.json b/qapi/qom.json +index eeb5395..a0450f7 100644 +--- a/qapi/qom.json ++++ b/qapi/qom.json +@@ -785,6 +785,30 @@ + 'reduced-phys-bits': 'uint32', + '*kernel-hashes': 'bool' } } + ++## ++# @TmmGuestMeasurementAlgo: ++# ++# Algorithm to use for cvm measurements ++# ++# Since: FIXME ++## ++{ 'enum': 'TmmGuestMeasurementAlgo', ++'data': ['default', 'sha256', 'sha512'] } ++ ++## ++# @TmmGuestProperties: ++# ++# Properties for tmm-guest objects. ++# ++# @sve-vector-length: SVE vector length (default: 0, SVE disabled) ++# ++# Since: FIXME ++## ++{ 'struct': 'TmmGuestProperties', ++ 'data': { '*sve-vector-length': 'uint32', ++ '*num-pmu-counters': 'uint32', ++ '*measurement-algo': 'TmmGuestMeasurementAlgo' } } ++ + ## + # @ObjectType: + # +@@ -842,7 +866,8 @@ + 'tls-creds-psk', + 'tls-creds-x509', + 'tls-cipher-suites', +- { 'name': 'x-remote-object', 'features': [ 'unstable' ] } ++ { 'name': 'x-remote-object', 'features': [ 'unstable' ] }, ++ 'tmm-guest' + ] } + + ## +@@ -905,7 +930,8 @@ + 'tls-creds-psk': 'TlsCredsPskProperties', + 'tls-creds-x509': 'TlsCredsX509Properties', + 'tls-cipher-suites': 'TlsCredsProperties', +- 'x-remote-object': 'RemoteObjectProperties' ++ 'x-remote-object': 'RemoteObjectProperties', ++ 'tmm-guest': 'TmmGuestProperties' + } } + + ## +diff --git a/target/arm/kvm-tmm.c b/target/arm/kvm-tmm.c +new file mode 100644 +index 0000000..e7df485 +--- /dev/null ++++ b/target/arm/kvm-tmm.c +@@ -0,0 +1,344 @@ ++/* ++ * QEMU add virtcca cvm feature. ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ * ++ */ ++ ++#include "qemu/osdep.h" ++#include "exec/confidential-guest-support.h" ++#include "hw/boards.h" ++#include "hw/core/cpu.h" ++#include "kvm_arm.h" ++#include "migration/blocker.h" ++#include "qapi/error.h" ++#include "qom/object_interfaces.h" ++#include "sysemu/kvm.h" ++#include "sysemu/runstate.h" ++#include "hw/loader.h" ++ ++#define TYPE_TMM_GUEST "tmm-guest" ++OBJECT_DECLARE_SIMPLE_TYPE(TmmGuest, TMM_GUEST) ++ ++#define TMM_PAGE_SIZE qemu_real_host_page_size ++#define TMM_MAX_PMU_CTRS 0x20 ++#define TMM_MAX_CFG 5 ++ ++struct TmmGuest { ++ ConfidentialGuestSupport parent_obj; ++ GSList *ram_regions; ++ TmmGuestMeasurementAlgo measurement_algo; ++ uint32_t sve_vl; ++ uint32_t num_pmu_cntrs; ++}; ++ ++typedef struct { ++ hwaddr base1; ++ hwaddr len1; ++ hwaddr base2; ++ hwaddr len2; ++ bool populate; ++} TmmRamRegion; ++ ++static TmmGuest *tmm_guest; ++ ++bool kvm_arm_tmm_enabled(void) ++{ ++ return !!tmm_guest; ++} ++ ++static int tmm_configure_one(TmmGuest *guest, uint32_t cfg, Error **errp) ++{ ++ int ret = 1; ++ const char *cfg_str; ++ struct kvm_cap_arm_tmm_config_item args = { ++ .cfg = cfg, ++ }; ++ ++ switch (cfg) { ++ case KVM_CAP_ARM_TMM_CFG_RPV: ++ return 0; ++ case KVM_CAP_ARM_TMM_CFG_HASH_ALGO: ++ switch (guest->measurement_algo) { ++ case TMM_GUEST_MEASUREMENT_ALGO_DEFAULT: ++ return 0; ++ case TMM_GUEST_MEASUREMENT_ALGO_SHA256: ++ args.hash_algo = KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256; ++ break; ++ case TMM_GUEST_MEASUREMENT_ALGO_SHA512: ++ args.hash_algo = KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ cfg_str = "hash algorithm"; ++ break; ++ case KVM_CAP_ARM_TMM_CFG_SVE: ++ if (!guest->sve_vl) { ++ return 0; ++ } ++ args.sve_vq = guest->sve_vl / 128; ++ cfg_str = "SVE"; ++ break; ++ case KVM_CAP_ARM_TMM_CFG_DBG: ++ return 0; ++ case KVM_CAP_ARM_TMM_CFG_PMU: ++ if (!guest->num_pmu_cntrs) { ++ return 0; ++ } ++ args.num_pmu_cntrs = guest->num_pmu_cntrs; ++ cfg_str = "PMU"; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, ++ KVM_CAP_ARM_TMM_CONFIG_CVM, (intptr_t)&args); ++ if (ret) { ++ error_setg_errno(errp, -ret, "TMM: failed to configure %s", cfg_str); ++ } ++ ++ return ret; ++} ++ ++static gint tmm_compare_ram_regions(gconstpointer a, gconstpointer b) ++{ ++ const TmmRamRegion *ra = a; ++ const TmmRamRegion *rb = b; ++ ++ g_assert(ra->base1 != rb->base1); ++ return ra->base1 < rb->base1 ? -1 : 1; ++} ++ ++void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, hwaddr len2, bool populate) ++{ ++ TmmRamRegion *region; ++ ++ region = g_new0(TmmRamRegion, 1); ++ region->base1 = QEMU_ALIGN_DOWN(base1, TMM_PAGE_SIZE); ++ region->len1 = QEMU_ALIGN_UP(len1, TMM_PAGE_SIZE); ++ region->base2 = QEMU_ALIGN_DOWN(base2, TMM_PAGE_SIZE); ++ region->len2 = QEMU_ALIGN_UP(len2, TMM_PAGE_SIZE); ++ region->populate = populate; ++ ++ tmm_guest->ram_regions = g_slist_insert_sorted(tmm_guest->ram_regions, ++ region, tmm_compare_ram_regions); ++} ++ ++static void tmm_populate_region(gpointer data, gpointer unused) ++{ ++ int ret; ++ const TmmRamRegion *region = data; ++ struct kvm_cap_arm_tmm_populate_region_args populate_args = { ++ .populate_ipa_base1 = region->base1, ++ .populate_ipa_size1 = region->len1, ++ .populate_ipa_base2 = region->base2, ++ .populate_ipa_size2 = region->len2, ++ .flags = KVM_ARM_TMM_POPULATE_FLAGS_MEASURE, ++ }; ++ ++ if (!region->populate) { ++ return; ++ } ++ ++ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, ++ KVM_CAP_ARM_TMM_POPULATE_CVM, ++ (intptr_t)&populate_args); ++ if (ret) { ++ error_report("TMM: failed to populate cvm region (0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx"): %s", ++ region->base1, region->len1, region->base2, region->len2, strerror(-ret)); ++ exit(1); ++ } ++} ++ ++static int tmm_create_rd(Error **errp) ++{ ++ int ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, ++ KVM_CAP_ARM_TMM_CREATE_RD); ++ if (ret) { ++ error_setg_errno(errp, -ret, "TMM: failed to create tmm Descriptor"); ++ } ++ return ret; ++} ++ ++static void tmm_vm_state_change(void *opaque, bool running, RunState state) ++{ ++ int ret; ++ CPUState *cs; ++ ++ if (!running) { ++ return; ++ } ++ ++ g_slist_foreach(tmm_guest->ram_regions, tmm_populate_region, NULL); ++ g_slist_free_full(g_steal_pointer(&tmm_guest->ram_regions), g_free); ++ ++ CPU_FOREACH(cs) { ++ ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_TEC); ++ if (ret) { ++ error_report("TMM: failed to finalize vCPU: %s", strerror(-ret)); ++ exit(1); ++ } ++ } ++ ++ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, ++ KVM_CAP_ARM_TMM_ACTIVATE_CVM); ++ if (ret) { ++ error_report("TMM: failed to activate cvm: %s", strerror(-ret)); ++ exit(1); ++ } ++} ++ ++int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp) ++{ ++ int ret; ++ int cfg; ++ ++ if (!tmm_guest) { ++ return -ENODEV; ++ } ++ ++ if (!kvm_check_extension(kvm_state, KVM_CAP_ARM_TMM)) { ++ error_setg(errp, "KVM does not support TMM"); ++ return -ENODEV; ++ } ++ ++ for (cfg = 0; cfg < TMM_MAX_CFG; cfg++) { ++ ret = tmm_configure_one(tmm_guest, cfg, &error_abort); ++ if (ret) { ++ return ret; ++ } ++ } ++ ++ ret = tmm_create_rd(&error_abort); ++ if (ret) { ++ return ret; ++ } ++ ++ qemu_add_vm_change_state_handler(tmm_vm_state_change, NULL); ++ return 0; ++} ++ ++static void tmm_get_sve_vl(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ ++ visit_type_uint32(v, name, &guest->sve_vl, errp); ++} ++ ++static void tmm_set_sve_vl(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ uint32_t value; ++ ++ if (!visit_type_uint32(v, name, &value, errp)) { ++ return; ++ } ++ ++ if (value & 0x7f || value >= ARM_MAX_VQ * 128) { ++ error_setg(errp, "invalid SVE vector length"); ++ return; ++ } ++ ++ guest->sve_vl = value; ++} ++ ++static void tmm_get_num_pmu_cntrs(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ ++ visit_type_uint32(v, name, &guest->num_pmu_cntrs, errp); ++} ++ ++static void tmm_set_num_pmu_cntrs(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ uint32_t value; ++ ++ if (!visit_type_uint32(v, name, &value, errp)) { ++ return; ++ } ++ ++ if (value >= TMM_MAX_PMU_CTRS) { ++ error_setg(errp, "invalid number of PMU counters"); ++ return; ++ } ++ ++ guest->num_pmu_cntrs = value; ++} ++ ++static int tmm_get_measurement_algo(Object *obj, Error **errp G_GNUC_UNUSED) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ ++ return guest->measurement_algo; ++} ++ ++static void tmm_set_measurement_algo(Object *obj, int algo, Error **errp G_GNUC_UNUSED) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ ++ guest->measurement_algo = algo; ++} ++ ++static void tmm_guest_class_init(ObjectClass *oc, void *data) ++{ ++ object_class_property_add_enum(oc, "measurement-algo", ++ "TmmGuestMeasurementAlgo", ++ &TmmGuestMeasurementAlgo_lookup, ++ tmm_get_measurement_algo, ++ tmm_set_measurement_algo); ++ object_class_property_set_description(oc, "measurement-algo", ++ "cvm measurement algorithm ('sha256', 'sha512')"); ++ /* ++ * This is not ideal. Normally SVE parameters are given to -cpu, but the ++ * cvm parameters are needed much earlier than CPU initialization. We also ++ * don't have a way to discover what is supported at the moment, the idea is ++ * that the user knows exactly what hardware it is running on because these ++ * parameters are part of the measurement and play in the attestation. ++ */ ++ object_class_property_add(oc, "sve-vector-length", "uint32", tmm_get_sve_vl, ++ tmm_set_sve_vl, NULL, NULL); ++ object_class_property_set_description(oc, "sve-vector-length", ++ "SVE vector length. 0 disables SVE (the default)"); ++ object_class_property_add(oc, "num-pmu-counters", "uint32", ++ tmm_get_num_pmu_cntrs, tmm_set_num_pmu_cntrs, ++ NULL, NULL); ++ object_class_property_set_description(oc, "num-pmu-counters", ++ "Number of PMU counters"); ++} ++ ++static void tmm_guest_instance_init(Object *obj) ++{ ++ if (tmm_guest) { ++ error_report("a single instance of TmmGuest is supported"); ++ exit(1); ++ } ++ tmm_guest = TMM_GUEST(obj); ++} ++ ++static const TypeInfo tmm_guest_info = { ++ .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT, ++ .name = TYPE_TMM_GUEST, ++ .instance_size = sizeof(struct TmmGuest), ++ .instance_init = tmm_guest_instance_init, ++ .class_init = tmm_guest_class_init, ++ .interfaces = (InterfaceInfo[]) { ++ { TYPE_USER_CREATABLE }, ++ { } ++ } ++}; ++ ++static void tmm_register_types(void) ++{ ++ type_register_static(&tmm_guest_info); ++} ++type_init(tmm_register_types); +diff --git a/target/arm/kvm.c b/target/arm/kvm.c +index 38d80ad..f62d9ec 100644 +--- a/target/arm/kvm.c ++++ b/target/arm/kvm.c +@@ -605,7 +605,9 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) + if (kvm_arm_cpreg_level(regidx) > level) { + continue; + } +- ++ if (virtcca_cvm_enabled() && regidx == KVM_REG_ARM_TIMER_CNT) { ++ continue; ++ } + r.id = regidx; + switch (regidx & KVM_REG_SIZE_MASK) { + case KVM_REG_SIZE_U32: +@@ -1140,7 +1142,7 @@ int kvm_arch_msi_data_to_gsi(uint32_t data) + + bool kvm_arch_cpu_check_are_resettable(void) + { +- return true; ++ return !virtcca_cvm_enabled(); + } + + void kvm_arch_accel_class_init(ObjectClass *oc) +diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c +index 0f67b8b..38d5198 100644 +--- a/target/arm/kvm64.c ++++ b/target/arm/kvm64.c +@@ -818,6 +818,11 @@ static int kvm_arm_sve_set_vls(CPUState *cs) + + assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); + ++ if (virtcca_cvm_enabled()) { ++ /* Already set through tmm config */ ++ return 0; ++ } ++ + for (vq = 1; vq <= cpu->sve_max_vq; ++vq) { + if (test_bit(vq - 1, cpu->sve_vq_map)) { + i = (vq - 1) / 64; +diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h +index 8b644b3..5a49383 100644 +--- a/target/arm/kvm_arm.h ++++ b/target/arm/kvm_arm.h +@@ -377,6 +377,11 @@ void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa); + + int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); + ++void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, hwaddr len2, bool populate); ++ ++int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp); ++bool kvm_arm_tmm_enabled(void); ++ + int kvm_arm_get_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *target); + int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *source); + +@@ -471,6 +476,11 @@ static inline int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx, + g_assert_not_reached(); + } + ++static inline int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp G_GNUC_UNUSED) ++{ ++ g_assert_not_reached(); ++} ++ + #endif + + static inline const char *gic_class_name(void) +diff --git a/target/arm/meson.build b/target/arm/meson.build +index 50f1522..bb950fb 100644 +--- a/target/arm/meson.build ++++ b/target/arm/meson.build +@@ -39,6 +39,7 @@ arm_ss.add(files( + arm_ss.add(zlib) + + arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c')) ++arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c', 'kvm-tmm.c'), if_false: files('kvm-stub.c')) + + arm_ss.add(when: 'TARGET_AARCH64', if_true: files( + 'cpu64.c', +-- +2.31.1.windows.1 + diff --git a/qemu.spec b/qemu.spec index ca2f21a..df7cc83 100644 --- a/qemu.spec +++ b/qemu.spec @@ -3,7 +3,7 @@ Name: qemu Version: 6.2.0 -Release: 91 +Release: 92 Epoch: 10 Summary: QEMU is a generic and open source machine emulator and virtualizer License: GPLv2 and BSD and MIT and CC-BY-SA-4.0 @@ -938,6 +938,7 @@ Patch0923: hw-virtio-Introduce-virtio_bh_new_guarded-helper.patch Patch0924: hw-display-virtio-gpu-Protect-from-DMA-re-entrancy-b.patch Patch0925: hw-char-virtio-serial-bus-Protect-from-DMA-re-entran.patch Patch0926: hw-virtio-virtio-crypto-Protect-from-DMA-re-entrancy.patch +Patch0927: Add-virtcca-cvm-feature.patch BuildRequires: flex BuildRequires: gcc @@ -1536,6 +1537,9 @@ getent passwd qemu >/dev/null || \ %endif %changelog +* Wed May 29 2024 - 10:6.2.0-92 +- cvm: Add virtcca cvm feature + * Mon Apr 22 2024 - 10:6.2.0-91 - hw/virtio/virtio-crypto: Protect from DMA re-entrancy bugs(CVE-2024-3446) - hw/char/virtio-serial-bus: Protect from DMA re-entrancy bugs(CVE-2024-3446) -- Gitee