diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 17811002a8f7cdf54fb065e2b65a60472f2f6d3e..c40b6a09ffd85d4c6fe55f7e1c23506547d189cd 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -151,8 +151,20 @@ In order to create user controlled virtual machines on S390, check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as privileged user (CAP_SYS_ADMIN). -On arm64, the physical address size for a VM (IPA Size limit) is limited -to 40bits by default. The limit can be configured if the host supports the +On arm64, the machine type identifier is used to encode a type and the +physical address size for the VM. The lower byte (bits[7-0]) encode the +address size and the upper bits[11-8] encode a machine type. The machine +types that might be available are: + + ====================== ============================================ + KVM_VM_TYPE_ARM_NORMAL A standard VM + KVM_VM_TYPE_ARM_REALM A "Realm" VM using the Arm Confidential + Compute extensions, the VM's memory is + protected from the host. + ====================== ============================================ + +The physical address size for a VM (IPA Size limit) is limited to 40bits +by default. The limit can be configured if the host supports the extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use KVM_VM_TYPE_ARM_IPA_SIZE(IPA_Bits) to set the size in the machine type identifier, where IPA_Bits is the maximum width of any physical @@ -1271,6 +1283,8 @@ User space may need to inject several types of events to the guest. Set the pending SError exception state for this VCPU. It is not possible to 'cancel' an Serror that has been made pending. +User space cannot inject SErrors into Realms. + If the guest performed an access to I/O memory which could not be handled by userspace, for example because of missing instruction syndrome decode information or because there is no device mapped at the accessed IPA, then @@ -3526,6 +3540,11 @@ Possible features: - the KVM_REG_ARM64_SVE_VLS pseudo-register is immutable, and can no longer be written using KVM_SET_ONE_REG. + - KVM_ARM_VCPU_REC: Allocate a REC (Realm Execution Context) for this + VCPU. This must be specified on all VCPUs created in a Realm VM. + Depends on KVM_CAP_ARM_RME. + Requires KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_REC). + 4.83 KVM_ARM_PREFERRED_TARGET ----------------------------- @@ -5061,6 +5080,7 @@ Recognised values for feature: ===== =========================================== arm64 KVM_ARM_VCPU_SVE (requires KVM_CAP_ARM_SVE) + arm64 KVM_ARM_VCPU_REC (requires KVM_CAP_ARM_RME) ===== =========================================== Finalizes the configuration of the specified vcpu feature. @@ -6320,6 +6340,30 @@ to the byte array. __u64 flags; } hypercall; +4.144 KVM_ARM_VCPU_RMM_PSCI_COMPLETE +------------------------------------ + +:Capability: KVM_CAP_ARM_RME +:Architectures: arm64 +:Type: vcpu ioctl +:Parameters: struct kvm_arm_rmm_psci_complete (in) +:Returns: 0 if successful, < 0 on error + +:: + + struct kvm_arm_rmm_psci_complete { + __u64 target_mpidr; + __u32 psci_status; + __u32 padding[3]; + }; + +Where PSCI functions are handled by user space, the RMM needs to be informed of +the target of the operation using `target_mpidr`, along with the status +(`psci_status`). The RMM v1.0 specification defines two functions that require +this call: PSCI_CPU_ON and PSCI_AFFINITY_INFO. + +If the kernel is handling PSCI then this is done automatically and the VMM +doesn't need to call this ioctl. It is strongly recommended that userspace use ``KVM_EXIT_IO`` (x86) or ``KVM_EXIT_MMIO`` (all except s390) to implement functionality that @@ -7794,6 +7838,46 @@ This capability is aimed to mitigate the threat that malicious VMs can cause CPU stuck (due to event windows don't open up) and make the CPU unavailable to host or other VMs. +7.38 KVM_CAP_ARM_RME +-------------------- + +:Architectures: arm64 +:Target: VM +:Parameters: args[0] provides an action, args[1] points to a structure in + memory for some actions. +:Returns: 0 on success, negative value on error + +Used to configure and set up the memory for a Realm. The available actions are: + +================================= ============================================= + KVM_CAP_ARM_RME_CONFIG_REALM Takes struct arm_rme_config as args[1] and + configures realm parameters prior to it being + created. + + Options are ARM_RME_CONFIG_RPV to set the + "Realm Personalization Value" and + ARM_RME_CONFIG_HASH_ALGO to set the hash + algorithm. + + KVM_CAP_ARM_RME_CREATE_REALM Request the RMM create the realm. The realm's + configuration parameters must be set first. + + KVM_CAP_ARM_RME_INIT_RIPAS_REALM Takes struct arm_rme_init_ripas as args[1] + and sets the RIPAS (Realm IPA State) to + RIPAS_RAM of a specified area of the realm's + IPA. + + KVM_CAP_ARM_RME_POPULATE_REALM Takes struct arm_rme_init_ripas as args[1] + and populates a region of protected address + space by copying the data from the shared + alias. + + KVM_CAP_ARM_RME_ACTIVATE_REALM Request the RMM activate the realm. No + further changes can be made to the realm's + configuration, and VCPUs are not permitted to + enter the realm until it has been activated. +================================= ============================================= + 8. Other capabilities. ====================== @@ -8116,6 +8200,9 @@ is supported, than the other should as well and vice versa. For arm64 see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL". For x86 see Documentation/virt/kvm/x86/msr.rst "MSR_KVM_STEAL_TIME". +Note that steal time accounting is not available when a guest is running +within a Arm CCA realm (machine type KVM_VM_TYPE_ARM_REALM). + 8.25 KVM_CAP_S390_DIAG318 ------------------------- diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 9723c76e56a6655ddc15e0903d07bb791249ad47..1769041ae28fd8ba9330ccbf14dfae241e401dd8 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -784,8 +784,7 @@ CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_KVM_HISI_VIRT=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=y -CONFIG_HISI_VIRTCCA_HOST=y -CONFIG_HISI_VIRTCCA_CODA=y +# CONFIG_HISI_VIRTCCA_HOST is not set # CONFIG_NVHE_EL2_DEBUG is not set CONFIG_KVM_ARM_MULTI_LPI_TRANSLATE_CACHE=y CONFIG_ARCH_VCPU_STAT=y diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f0b10cb2c87dd1a9cd67cc3e641d6a6324a8e19c..d8d80ffad0695e6c492c3a219d0ec8b0ea8a2b02 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -643,21 +643,43 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) kvm_write_cptr_el2(val); } -#ifdef CONFIG_HISI_VIRTCCA_HOST -static inline bool kvm_is_virtcca_cvm(struct kvm *kvm) +static inline bool kvm_is_realm(struct kvm *kvm) { - if (static_branch_unlikely(&virtcca_cvm_is_available)) - return kvm->arch.is_virtcca_cvm; + if (static_branch_unlikely(&kvm_rme_is_available) && kvm) + return kvm->arch.is_realm; return false; } -static inline enum virtcca_cvm_state virtcca_cvm_state(struct kvm *kvm) +static inline enum realm_state kvm_realm_state(struct kvm *kvm) { - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; + return READ_ONCE(kvm->arch.realm.state); +} - if (!virtcca_cvm) - return 0; - return READ_ONCE(virtcca_cvm->state); +static inline bool kvm_realm_is_created(struct kvm *kvm) +{ + return kvm_is_realm(kvm) && kvm_realm_state(kvm) != REALM_STATE_NONE; } -#endif + +static inline gpa_t kvm_gpa_from_fault(struct kvm *kvm, phys_addr_t ipa) +{ + if (kvm_is_realm(kvm)) { + struct realm *realm = &kvm->arch.realm; + + return ipa & ~BIT(realm->ia_bits - 1); + } + return ipa; +} + +static inline bool vcpu_is_rec(struct kvm_vcpu *vcpu) +{ + if (static_branch_unlikely(&kvm_rme_is_available)) + return vcpu_has_feature(vcpu, KVM_ARM_VCPU_REC); + return false; +} + +static inline bool kvm_arm_rec_finalized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.rec.mpidr != INVALID_HWID; +} + #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 81898bb87c5e999375bfd9e23c80f48b976509aa..a2e6f246fd85bd71ecb768f04284c6c0baa25a46 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef CONFIG_HISI_VIRTCCA_HOST #include #endif @@ -41,7 +42,7 @@ #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS -#define KVM_VCPU_MAX_FEATURES 7 +#define KVM_VCPU_MAX_FEATURES 10 #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1) #define KVM_REQ_SLEEP \ @@ -77,8 +78,8 @@ enum kvm_mode kvm_get_mode(void); static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; }; #endif -extern unsigned int __ro_after_init kvm_sve_max_vl; int __init kvm_arm_init_sve(void); +unsigned int kvm_sve_get_max_vl(struct kvm *kvm); u32 __attribute_const__ kvm_target_cpu(void); int kvm_reset_vcpu(struct kvm_vcpu *vcpu); @@ -285,6 +286,8 @@ struct kvm_arch { */ struct kvm_protected_vm pkvm; + bool is_realm; + struct realm realm; #ifdef CONFIG_KVM_HISI_VIRT spinlock_t sched_lock; cpumask_var_t sched_cpus; /* Union of all vcpu's cpus_ptr */ @@ -632,6 +635,9 @@ struct kvm_vcpu_arch { /* Per-vcpu CCSIDR override or NULL */ u32 *ccsidr; + /* Realm meta data */ + struct realm_rec rec; + #ifdef CONFIG_KVM_HISI_VIRT /* pCPUs this vCPU can be scheduled on. Pure copy of current->cpus_ptr */ cpumask_var_t sched_cpus; diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 4a03e48011273db133c60358a22b886a65e4ed85..0c0ae56e8163ca5d0792bc53354c85a2ff12032f 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -44,6 +44,15 @@ typedef u64 kvm_pte_t; #define KVM_PHYS_INVALID (-1ULL) +#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6) +#define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7) + +/* + * Used to indicate a pte for which a 'break-before-make' sequence is in + * progress. + */ +#define KVM_INVALID_PTE_LOCKED BIT(10) + static inline bool kvm_pte_valid(kvm_pte_t pte) { return pte & KVM_PTE_VALID; diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h new file mode 100644 index 0000000000000000000000000000000000000000..ac031f61897be4d008871d2b97fb48d3403b687a --- /dev/null +++ b/arch/arm64/include/asm/kvm_rme.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 ARM Ltd. + */ + +#ifndef __ASM_KVM_RME_H +#define __ASM_KVM_RME_H + +#include +#include + +/** + * enum realm_state - State of a Realm + */ +enum realm_state { + /** + * @REALM_STATE_NONE: + * Realm has not yet been created. rmi_realm_create() may be + * called to create the realm. + */ + REALM_STATE_NONE, + /** + * @REALM_STATE_NEW: + * Realm is under construction, not eligible for execution. Pages + * may be populated with rmi_data_create(). + */ + REALM_STATE_NEW, + /** + * @REALM_STATE_ACTIVE: + * Realm has been created and is eligible for execution with + * rmi_rec_enter(). Pages may no longer be populated with + * rmi_data_create(). + */ + REALM_STATE_ACTIVE, + /** + * @REALM_STATE_DYING: + * Realm is in the process of being destroyed or has already been + * destroyed. + */ + REALM_STATE_DYING, + /** + * @REALM_STATE_DEAD: + * Realm has been destroyed. + */ + REALM_STATE_DEAD +}; + +/** + * struct realm - Additional per VM data for a Realm + * + * @state: The lifetime state machine for the realm + * @rd: Kernel mapping of the Realm Descriptor (RD) + * @params: Parameters for the RMI_REALM_CREATE command + * @num_aux: The number of auxiliary pages required by the RMM + * @vmid: VMID to be used by the RMM for the realm + * @ia_bits: Number of valid Input Address bits in the IPA + * @pmu_enabled: PMU enabled in the realm + */ +struct realm { + enum realm_state state; + + void *rd; + struct realm_params *params; + + unsigned long num_aux; + unsigned int vmid; + unsigned int ia_bits; + bool pmu_enabled; +}; + +/** + * struct realm_rec - Additional per VCPU data for a Realm + * + * @mpidr: MPIDR (Multiprocessor Affinity Register) value to identify this VCPU + * @rec_page: Kernel VA of the RMM's private page for this REC + * @aux_pages: Additional pages private to the RMM for this REC + * @run: Kernel VA of the RmiRecRun structure shared with the RMM + */ +struct realm_rec { + unsigned long mpidr; + void *rec_page; + /* + * REC_PARAMS_AUX_GRANULES is the maximum number of granules that the + * RMM can require. By using that to size the array we know that it + * will be big enough as the page size is always at least as large as + * the granule size. In the case of a larger page size than 4k (or an + * RMM which requires fewer auxiliary granules), the array will be + * bigger than needed however the extra memory required is small and + * this keeps the code cleaner. + */ + struct page *aux_pages[REC_PARAMS_AUX_GRANULES]; + struct rec_run *run; +}; + +void kvm_init_rme(void); +u32 kvm_realm_ipa_limit(void); +u32 kvm_realm_vgic_nr_lr(void); +u32 kvm_realm_get_num_brps(void); +u32 kvm_realm_get_num_wrps(void); +unsigned int kvm_realm_sve_max_vl(void); + +u64 kvm_realm_reset_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val); + +bool kvm_rme_supports_sve(void); + +int kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); +int kvm_init_realm_vm(struct kvm *kvm); +void kvm_destroy_realm(struct kvm *kvm); +void kvm_realm_destroy_rtts(struct kvm *kvm, u32 ia_bits); +int kvm_create_rec(struct kvm_vcpu *vcpu); +void kvm_destroy_rec(struct kvm_vcpu *vcpu); + +int kvm_rec_enter(struct kvm_vcpu *vcpu); +int handle_rec_exit(struct kvm_vcpu *vcpu, int rec_run_status); + +void kvm_realm_unmap_range(struct kvm *kvm, + unsigned long ipa, + unsigned long size, + bool unmap_private); +int realm_map_protected(struct realm *realm, + unsigned long base_ipa, + kvm_pfn_t pfn, + unsigned long size, + struct kvm_mmu_memory_cache *memcache); +int realm_map_non_secure(struct realm *realm, + unsigned long ipa, + kvm_pfn_t pfn, + unsigned long size, + struct kvm_mmu_memory_cache *memcache); +int realm_psci_complete(struct kvm_vcpu *source, + struct kvm_vcpu *target, + unsigned long status); + +static inline bool kvm_realm_is_private_address(struct realm *realm, + unsigned long addr) +{ + return !(addr & BIT(realm->ia_bits - 1)); +} + +#endif /* __ASM_KVM_RME_H */ diff --git a/arch/arm64/include/asm/rmi_cmds.h b/arch/arm64/include/asm/rmi_cmds.h new file mode 100644 index 0000000000000000000000000000000000000000..27cd2751f3bfa4c599c0498301ec3c8f611cdf1b --- /dev/null +++ b/arch/arm64/include/asm/rmi_cmds.h @@ -0,0 +1,508 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 ARM Ltd. + */ + +#ifndef __ASM_RMI_CMDS_H +#define __ASM_RMI_CMDS_H + +#include + +#include + +struct rtt_entry { + unsigned long walk_level; + unsigned long desc; + int state; + int ripas; +}; + +/** + * rmi_data_create() - Create a data granule + * @rd: PA of the RD + * @data: PA of the target granule + * @ipa: IPA at which the granule will be mapped in the guest + * @src: PA of the source granule + * @flags: RMI_MEASURE_CONTENT if the contents should be measured + * + * Create a new data granule, copying contents from a non-secure granule. + * + * Return: RMI return code + */ +static inline int rmi_data_create(unsigned long rd, unsigned long data, + unsigned long ipa, unsigned long src, + unsigned long flags) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_DATA_CREATE, rd, data, ipa, src, + flags, &res); + + return res.a0; +} + +/** + * rmi_data_create_unknown() - Create a data granule with unknown contents + * @rd: PA of the RD + * @data: PA of the target granule + * @ipa: IPA at which the granule will be mapped in the guest + * + * Return: RMI return code + */ +static inline int rmi_data_create_unknown(unsigned long rd, + unsigned long data, + unsigned long ipa) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_DATA_CREATE_UNKNOWN, rd, data, ipa, &res); + + return res.a0; +} + +/** + * rmi_data_destroy() - Destroy a data granule + * @rd: PA of the RD + * @ipa: IPA at which the granule is mapped in the guest + * @data_out: PA of the granule which was destroyed + * @top_out: Top IPA of non-live RTT entries + * + * Unmap a protected IPA from stage 2, transitioning it to DESTROYED. + * The IPA cannot be used by the guest unless it is transitioned to RAM again + * by the realm guest. + * + * Return: RMI return code + */ +static inline int rmi_data_destroy(unsigned long rd, unsigned long ipa, + unsigned long *data_out, + unsigned long *top_out) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_DATA_DESTROY, rd, ipa, &res); + + if (data_out) + *data_out = res.a1; + if (top_out) + *top_out = res.a2; + + return res.a0; +} + +/** + * rmi_features() - Read feature register + * @index: Feature register index + * @out: Feature register value is written to this pointer + * + * Return: RMI return code + */ +static inline int rmi_features(unsigned long index, unsigned long *out) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_FEATURES, index, &res); + + if (out) + *out = res.a1; + return res.a0; +} + +/** + * rmi_granule_delegate() - Delegate a granule + * @phys: PA of the granule + * + * Delegate a granule for use by the realm world. + * + * Return: RMI return code + */ +static inline int rmi_granule_delegate(unsigned long phys) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_GRANULE_DELEGATE, phys, &res); + + return res.a0; +} + +/** + * rmi_granule_undelegate() - Undelegate a granule + * @phys: PA of the granule + * + * Undelegate a granule to allow use by the normal world. Will fail if the + * granule is in use. + * + * Return: RMI return code + */ +static inline int rmi_granule_undelegate(unsigned long phys) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_GRANULE_UNDELEGATE, phys, &res); + + return res.a0; +} + +/** + * rmi_psci_complete() - Complete pending PSCI command + * @calling_rec: PA of the calling REC + * @target_rec: PA of the target REC + * @status: Status of the PSCI request + * + * Completes a pending PSCI command which was called with an MPIDR argument, by + * providing the corresponding REC. + * + * Return: RMI return code + */ +static inline int rmi_psci_complete(unsigned long calling_rec, + unsigned long target_rec, + unsigned long status) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_PSCI_COMPLETE, calling_rec, target_rec, + status, &res); + + return res.a0; +} + +/** + * rmi_realm_activate() - Active a realm + * @rd: PA of the RD + * + * Mark a realm as Active signalling that creation is complete and allowing + * execution of the realm. + * + * Return: RMI return code + */ +static inline int rmi_realm_activate(unsigned long rd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_REALM_ACTIVATE, rd, &res); + + return res.a0; +} + +/** + * rmi_realm_create() - Create a realm + * @rd: PA of the RD + * @params: PA of realm parameters + * + * Create a new realm using the given parameters. + * + * Return: RMI return code + */ +static inline int rmi_realm_create(unsigned long rd, unsigned long params) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_REALM_CREATE, rd, params, &res); + + return res.a0; +} + +/** + * rmi_realm_destroy() - Destroy a realm + * @rd: PA of the RD + * + * Destroys a realm, all objects belonging to the realm must be destroyed first. + * + * Return: RMI return code + */ +static inline int rmi_realm_destroy(unsigned long rd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_REALM_DESTROY, rd, &res); + + return res.a0; +} + +/** + * rmi_rec_aux_count() - Get number of auxiliary granules required + * @rd: PA of the RD + * @aux_count: Number of granules written to this pointer + * + * A REC may require extra auxiliary granules to be delegated for the RMM to + * store metadata (not visible to the normal world) in. This function provides + * the number of granules that are required. + * + * Return: RMI return code + */ +static inline int rmi_rec_aux_count(unsigned long rd, unsigned long *aux_count) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_REC_AUX_COUNT, rd, &res); + + if (aux_count) + *aux_count = res.a1; + return res.a0; +} + +/** + * rmi_rec_create() - Create a REC + * @rd: PA of the RD + * @rec: PA of the target REC + * @params: PA of REC parameters + * + * Create a REC using the parameters specified in the struct rec_params pointed + * to by @params. + * + * Return: RMI return code + */ +static inline int rmi_rec_create(unsigned long rd, unsigned long rec, + unsigned long params) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_REC_CREATE, rd, rec, params, &res); + + return res.a0; +} + +/** + * rmi_rec_destroy() - Destroy a REC + * @rec: PA of the target REC + * + * Destroys a REC. The REC must not be running. + * + * Return: RMI return code + */ +static inline int rmi_rec_destroy(unsigned long rec) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_REC_DESTROY, rec, &res); + + return res.a0; +} + +/** + * rmi_rec_enter() - Enter a REC + * @rec: PA of the target REC + * @run_ptr: PA of RecRun structure + * + * Starts (or continues) execution within a REC. + * + * Return: RMI return code + */ +static inline int rmi_rec_enter(unsigned long rec, unsigned long run_ptr) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_REC_ENTER, rec, run_ptr, &res); + + return res.a0; +} + +/** + * rmi_rtt_create() - Creates an RTT + * @rd: PA of the RD + * @rtt: PA of the target RTT + * @ipa: Base of the IPA range described by the RTT + * @level: Depth of the RTT within the tree + * + * Creates an RTT (Realm Translation Table) at the specified level for the + * translation of the specified address within the realm. + * + * Return: RMI return code + */ +static inline int rmi_rtt_create(unsigned long rd, unsigned long rtt, + unsigned long ipa, long level) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_RTT_CREATE, rd, rtt, ipa, level, &res); + + return res.a0; +} + +/** + * rmi_rtt_destroy() - Destroy an RTT + * @rd: PA of the RD + * @ipa: Base of the IPA range described by the RTT + * @level: Depth of the RTT within the tree + * @out_rtt: Pointer to write the PA of the RTT which was destroyed + * @out_top: Pointer to write the top IPA of non-live RTT entries + * + * Destroys an RTT. The RTT must be non-live, i.e. none of the entries in the + * table are in ASSIGNED or TABLE state. + * + * Return: RMI return code. + */ +static inline int rmi_rtt_destroy(unsigned long rd, + unsigned long ipa, + long level, + unsigned long *out_rtt, + unsigned long *out_top) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_RTT_DESTROY, rd, ipa, level, &res); + + if (out_rtt) + *out_rtt = res.a1; + if (out_top) + *out_top = res.a2; + + return res.a0; +} + +/** + * rmi_rtt_fold() - Fold an RTT + * @rd: PA of the RD + * @ipa: Base of the IPA range described by the RTT + * @level: Depth of the RTT within the tree + * @out_rtt: Pointer to write the PA of the RTT which was destroyed + * + * Folds an RTT. If all entries with the RTT are 'homogeneous' the RTT can be + * folded into the parent and the RTT destroyed. + * + * Return: RMI return code + */ +static inline int rmi_rtt_fold(unsigned long rd, unsigned long ipa, + long level, unsigned long *out_rtt) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_RTT_FOLD, rd, ipa, level, &res); + + if (out_rtt) + *out_rtt = res.a1; + + return res.a0; +} + +/** + * rmi_rtt_init_ripas() - Set RIPAS for new realm + * @rd: PA of the RD + * @base: Base of target IPA region + * @top: Top of target IPA region + * @out_top: Top IPA of range whose RIPAS was modified + * + * Sets the RIPAS of a target IPA range to RAM, for a realm in the NEW state. + * + * Return: RMI return code + */ +static inline int rmi_rtt_init_ripas(unsigned long rd, unsigned long base, + unsigned long top, unsigned long *out_top) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_RTT_INIT_RIPAS, rd, base, top, &res); + + if (out_top) + *out_top = res.a1; + + return res.a0; +} + +/** + * rmi_rtt_map_unprotected() - Map NS granules into a realm + * @rd: PA of the RD + * @ipa: Base IPA of the mapping + * @level: Depth within the RTT tree + * @desc: RTTE descriptor + * + * Create a mapping from an Unprotected IPA to a Non-secure PA. + * + * Return: RMI return code + */ +static inline int rmi_rtt_map_unprotected(unsigned long rd, + unsigned long ipa, + long level, + unsigned long desc) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_RTT_MAP_UNPROTECTED, rd, ipa, level, + desc, &res); + + return res.a0; +} + +/** + * rmi_rtt_read_entry() - Read an RTTE + * @rd: PA of the RD + * @ipa: IPA for which to read the RTTE + * @level: RTT level at which to read the RTTE + * @rtt: Output structure describing the RTTE + * + * Reads a RTTE (Realm Translation Table Entry). + * + * Return: RMI return code + */ +static inline int rmi_rtt_read_entry(unsigned long rd, unsigned long ipa, + long level, struct rtt_entry *rtt) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_RTT_READ_ENTRY, + rd, ipa, level + }; + + arm_smccc_1_2_smc(®s, ®s); + + rtt->walk_level = regs.a1; + rtt->state = regs.a2 & 0xFF; + rtt->desc = regs.a3; + rtt->ripas = regs.a4 & 0xFF; + + return regs.a0; +} + +/** + * rmi_rtt_set_ripas() - Set RIPAS for an running realm + * @rd: PA of the RD + * @rec: PA of the REC making the request + * @base: Base of target IPA region + * @top: Top of target IPA region + * @out_top: Pointer to write top IPA of range whose RIPAS was modified + * + * Completes a request made by the realm to change the RIPAS of a target IPA + * range. + * + * Return: RMI return code + */ +static inline int rmi_rtt_set_ripas(unsigned long rd, unsigned long rec, + unsigned long base, unsigned long top, + unsigned long *out_top) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_RTT_SET_RIPAS, rd, rec, base, top, &res); + + if (out_top) + *out_top = res.a1; + + return res.a0; +} + +/** + * rmi_rtt_unmap_unprotected() - Remove a NS mapping + * @rd: PA of the RD + * @ipa: Base IPA of the mapping + * @level: Depth within the RTT tree + * @out_top: Pointer to write top IPA of non-live RTT entries + * + * Removes a mapping at an Unprotected IPA. + * + * Return: RMI return code + */ +static inline int rmi_rtt_unmap_unprotected(unsigned long rd, + unsigned long ipa, + long level, + unsigned long *out_top) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(SMC_RMI_RTT_UNMAP_UNPROTECTED, rd, ipa, + level, &res); + + if (out_top) + *out_top = res.a1; + + return res.a0; +} + +#endif /* __ASM_RMI_CMDS_H */ diff --git a/arch/arm64/include/asm/rmi_smc.h b/arch/arm64/include/asm/rmi_smc.h new file mode 100644 index 0000000000000000000000000000000000000000..7a93a3e0ac6eb981c95468fd616d8c62a220f634 --- /dev/null +++ b/arch/arm64/include/asm/rmi_smc.h @@ -0,0 +1,259 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023-2024 ARM Ltd. + * + * The values and structures in this file are from the Realm Management Monitor + * specification (DEN0137) version 1.0-rel0: + * https://developer.arm.com/documentation/den0137/1-0rel0/ + */ + +#ifndef __ASM_RMI_SMC_H +#define __ASM_RMI_SMC_H + +#include + +#define SMC_RMI_CALL(func) \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD, \ + (func)) + +#define SMC_RMI_VERSION SMC_RMI_CALL(0x0150) +#define SMC_RMI_GRANULE_DELEGATE SMC_RMI_CALL(0x0151) +#define SMC_RMI_GRANULE_UNDELEGATE SMC_RMI_CALL(0x0152) +#define SMC_RMI_DATA_CREATE SMC_RMI_CALL(0x0153) +#define SMC_RMI_DATA_CREATE_UNKNOWN SMC_RMI_CALL(0x0154) +#define SMC_RMI_DATA_DESTROY SMC_RMI_CALL(0x0155) + +#define SMC_RMI_REALM_ACTIVATE SMC_RMI_CALL(0x0157) +#define SMC_RMI_REALM_CREATE SMC_RMI_CALL(0x0158) +#define SMC_RMI_REALM_DESTROY SMC_RMI_CALL(0x0159) +#define SMC_RMI_REC_CREATE SMC_RMI_CALL(0x015a) +#define SMC_RMI_REC_DESTROY SMC_RMI_CALL(0x015b) +#define SMC_RMI_REC_ENTER SMC_RMI_CALL(0x015c) +#define SMC_RMI_RTT_CREATE SMC_RMI_CALL(0x015d) +#define SMC_RMI_RTT_DESTROY SMC_RMI_CALL(0x015e) +#define SMC_RMI_RTT_MAP_UNPROTECTED SMC_RMI_CALL(0x015f) + +#define SMC_RMI_RTT_READ_ENTRY SMC_RMI_CALL(0x0161) +#define SMC_RMI_RTT_UNMAP_UNPROTECTED SMC_RMI_CALL(0x0162) + +#define SMC_RMI_PSCI_COMPLETE SMC_RMI_CALL(0x0164) +#define SMC_RMI_FEATURES SMC_RMI_CALL(0x0165) +#define SMC_RMI_RTT_FOLD SMC_RMI_CALL(0x0166) +#define SMC_RMI_REC_AUX_COUNT SMC_RMI_CALL(0x0167) +#define SMC_RMI_RTT_INIT_RIPAS SMC_RMI_CALL(0x0168) +#define SMC_RMI_RTT_SET_RIPAS SMC_RMI_CALL(0x0169) + +#define RMI_ABI_MAJOR_VERSION 1 +#define RMI_ABI_MINOR_VERSION 0 + +#define RMI_ABI_VERSION_GET_MAJOR(version) ((version) >> 16) +#define RMI_ABI_VERSION_GET_MINOR(version) ((version) & 0xFFFF) +#define RMI_ABI_VERSION(major, minor) (((major) << 16) | (minor)) + +#define RMI_UNASSIGNED 0 +#define RMI_ASSIGNED 1 +#define RMI_TABLE 2 + +#define RMI_RETURN_STATUS(ret) ((ret) & 0xFF) +#define RMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF) + +#define RMI_SUCCESS 0 +#define RMI_ERROR_INPUT 1 +#define RMI_ERROR_REALM 2 +#define RMI_ERROR_REC 3 +#define RMI_ERROR_RTT 4 + +enum rmi_ripas { + RMI_EMPTY = 0, + RMI_RAM = 1, + RMI_DESTROYED = 2, +}; + +#define RMI_NO_MEASURE_CONTENT 0 +#define RMI_MEASURE_CONTENT 1 + +#define RMI_FEATURE_REGISTER_0_S2SZ GENMASK(7, 0) +#define RMI_FEATURE_REGISTER_0_LPA2 BIT(8) +#define RMI_FEATURE_REGISTER_0_SVE_EN BIT(9) +#define RMI_FEATURE_REGISTER_0_SVE_VL GENMASK(13, 10) +#define RMI_FEATURE_REGISTER_0_NUM_BPS GENMASK(19, 14) +#define RMI_FEATURE_REGISTER_0_NUM_WPS GENMASK(25, 20) +#define RMI_FEATURE_REGISTER_0_PMU_EN BIT(26) +#define RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS GENMASK(31, 27) +#define RMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(32) +#define RMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(33) +#define RMI_FEATURE_REGISTER_0_GICV3_NUM_LRS GENMASK(37, 34) +#define RMI_FEATURE_REGISTER_0_MAX_RECS_ORDER GENMASK(41, 38) +#define RMI_FEATURE_REGISTER_0_Reserved GENMASK(63, 42) + +#define RMI_REALM_PARAM_FLAG_LPA2 BIT(0) +#define RMI_REALM_PARAM_FLAG_SVE BIT(1) +#define RMI_REALM_PARAM_FLAG_PMU BIT(2) + +/* + * Note many of these fields are smaller than u64 but all fields have u64 + * alignment, so use u64 to ensure correct alignment. + */ +struct realm_params { + union { /* 0x0 */ + struct { + u64 flags; + u64 s2sz; + u64 sve_vl; + u64 num_bps; + u64 num_wps; + u64 pmu_num_ctrs; + u64 hash_algo; + }; + u8 padding0[0x400]; + }; + union { /* 0x400 */ + u8 rpv[64]; + u8 padding1[0x400]; + }; + union { /* 0x800 */ + struct { + u64 vmid; + u64 rtt_base; + s64 rtt_level_start; + u64 rtt_num_start; + }; + u8 padding2[0x800]; + }; +}; + +/* + * The number of GPRs (starting from X0) that are + * configured by the host when a REC is created. + */ +#define REC_CREATE_NR_GPRS 8 + +#define REC_PARAMS_FLAG_RUNNABLE BIT_ULL(0) + +#define REC_PARAMS_AUX_GRANULES 16 + +struct rec_params { + union { /* 0x0 */ + u64 flags; + u8 padding0[0x100]; + }; + union { /* 0x100 */ + u64 mpidr; + u8 padding1[0x100]; + }; + union { /* 0x200 */ + u64 pc; + u8 padding2[0x100]; + }; + union { /* 0x300 */ + u64 gprs[REC_CREATE_NR_GPRS]; + u8 padding3[0x500]; + }; + union { /* 0x800 */ + struct { + u64 num_rec_aux; + u64 aux[REC_PARAMS_AUX_GRANULES]; + }; + u8 padding4[0x800]; + }; +}; + +#define REC_ENTER_FLAG_EMULATED_MMIO BIT(0) +#define REC_ENTER_FLAG_INJECT_SEA BIT(1) +#define REC_ENTER_FLAG_TRAP_WFI BIT(2) +#define REC_ENTER_FLAG_TRAP_WFE BIT(3) +#define REC_ENTER_FLAG_RIPAS_RESPONSE BIT(4) + +#define REC_RUN_GPRS 31 +#define REC_MAX_GIC_NUM_LRS 16 + +struct rec_enter { + union { /* 0x000 */ + u64 flags; + u8 padding0[0x200]; + }; + union { /* 0x200 */ + u64 gprs[REC_RUN_GPRS]; + u8 padding1[0x100]; + }; + union { /* 0x300 */ + struct { + u64 gicv3_hcr; + u64 gicv3_lrs[REC_MAX_GIC_NUM_LRS]; + }; + u8 padding2[0x100]; + }; + u8 padding3[0x400]; +}; + +#define RMI_EXIT_SYNC 0x00 +#define RMI_EXIT_IRQ 0x01 +#define RMI_EXIT_FIQ 0x02 +#define RMI_EXIT_PSCI 0x03 +#define RMI_EXIT_RIPAS_CHANGE 0x04 +#define RMI_EXIT_HOST_CALL 0x05 +#define RMI_EXIT_SERROR 0x06 + +struct rec_exit { + union { /* 0x000 */ + u8 exit_reason; + u8 padding0[0x100]; + }; + union { /* 0x100 */ + struct { + u64 esr; + u64 far; + u64 hpfar; + }; + u8 padding1[0x100]; + }; + union { /* 0x200 */ + u64 gprs[REC_RUN_GPRS]; + u8 padding2[0x100]; + }; + union { /* 0x300 */ + struct { + u64 gicv3_hcr; + u64 gicv3_lrs[REC_MAX_GIC_NUM_LRS]; + u64 gicv3_misr; + u64 gicv3_vmcr; + }; + u8 padding3[0x100]; + }; + union { /* 0x400 */ + struct { + u64 cntp_ctl; + u64 cntp_cval; + u64 cntv_ctl; + u64 cntv_cval; + }; + u8 padding4[0x100]; + }; + union { /* 0x500 */ + struct { + u64 ripas_base; + u64 ripas_top; + u64 ripas_value; + }; + u8 padding5[0x100]; + }; + union { /* 0x600 */ + u16 imm; + u8 padding6[0x100]; + }; + union { /* 0x700 */ + struct { + u8 pmu_ovf_status; + }; + u8 padding7[0x100]; + }; +}; + +struct rec_run { + struct rec_enter enter; + struct rec_exit exit; +}; + +#endif /* __ASM_RMI_SMC_H */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 261d6e9df2e1009f8efab95d90dda611fcba1b3e..12cf36c3818904fb68c3ee19434c9a89054ed84b 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -81,6 +81,7 @@ void __hyp_reset_vectors(void); bool is_kvm_arm_initialised(void); DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); +DECLARE_STATIC_KEY_FALSE(kvm_rme_is_available); /* Reports the availability of HYP mode */ static inline bool is_hyp_mode_available(void) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 196231712b10975b06e929d347525d68364bae57..922d2dce1b77b7385f2e3716e241541052f16914 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -110,7 +110,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ #define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ -#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */ +#define KVM_ARM_VCPU_REC 9 /* VCPU REC state as part of Realm */ struct kvm_vcpu_init { __u32 target; @@ -421,8 +421,67 @@ enum { #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 #define KVM_DEV_ARM_ITS_CTRL_RESET 4 -#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256 0 -#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512 1 +/* KVM_CAP_ARM_RME on VM fd */ +#define KVM_CAP_ARM_RME_CONFIG_REALM 0 +#define KVM_CAP_ARM_RME_CREATE_REALM 1 +#define KVM_CAP_ARM_RME_INIT_RIPAS_REALM 2 +#define KVM_CAP_ARM_RME_POPULATE_REALM 3 +#define KVM_CAP_ARM_RME_ACTIVATE_REALM 4 + +/* List of configuration items accepted for KVM_CAP_ARM_RME_CONFIG_REALM */ +#define ARM_RME_CONFIG_RPV 0 +#define ARM_RME_CONFIG_HASH_ALGO 1 + +#define ARM_RME_CONFIG_MEASUREMENT_ALGO_SHA256 0 +#define ARM_RME_CONFIG_MEASUREMENT_ALGO_SHA512 1 + +#define ARM_RME_CONFIG_RPV_SIZE 64 + +#define KVM_CAP_ARM_RME_CFG_DBG 3 +#define KVM_CAP_ARM_RME_CFG_PMU 4 + +struct arm_rme_config { + __u32 cfg; + union { + /* cfg == ARM_RME_CONFIG_RPV */ + struct { + __u8 rpv[ARM_RME_CONFIG_RPV_SIZE]; + }; + + /* cfg == ARM_RME_CONFIG_HASH_ALGO */ + struct { + __u32 hash_algo; + }; + + /* cfg == KVM_CAP_ARM_RME_CFG_DBG */ + struct { + __u32 num_brps; + __u32 num_wrps; + }; + + /* cfg == KVM_CAP_ARM_RME_CFG_PMU */ + struct { + __u32 num_pmu_cntrs; + }; + + /* Fix the size of the union */ + __u8 reserved[256]; + }; +}; + +#define KVM_ARM_RME_POPULATE_FLAGS_MEASURE (1 << 0) +struct arm_rme_populate_realm { + __u64 base; + __u64 size; + __u32 flags; + __u32 reserved[3]; +}; + +struct arm_rme_init_ripas { + __u64 base; + __u64 size; + __u64 reserved[2]; +}; /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index eadf41417ffa6d14adafadc342b129af6395570e..dacae31420058e2ecb64f3aebb0422d2c63ac9c1 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -20,13 +20,11 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o pvsched.o \ vgic/vgic-v3.o vgic/vgic-v4.o \ vgic/vgic-mmio.o vgic/vgic-mmio-v2.o \ vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \ - vgic/vgic-its.o vgic/vgic-debug.o + vgic/vgic-its.o vgic/vgic-debug.o \ + rme.o rme-exit.o kvm-$(CONFIG_VIRT_PLAT_DEV) += vgic/shadow_dev.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o -kvm-$(CONFIG_HISI_VIRTCCA_HOST) += tmi.o -kvm-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm.o -kvm-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm_exit.o obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/ always-y := hyp_constants.h hyp-constants.s diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 27032290094d62f86dd9ba7e17f2b11e8f131241..6d18a4f14eac11e11d3638e2cd7961b24bd94aac 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -243,6 +243,13 @@ static inline void cvm_vcpu_put_timer_callback(struct kvm_vcpu *vcpu) static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) { + struct kvm_vcpu *vcpu = ctxt->vcpu; + + if (kvm_is_realm(vcpu->kvm)) { + WARN_ON(offset); + return; + } + #ifdef CONFIG_HISI_VIRTCCA_HOST if (kvm_is_virtcca_cvm(ctxt->vcpu->kvm)) return; @@ -545,6 +552,21 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, } } +void kvm_realm_timers_update(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu; + int i; + + for (i = 0; i < NR_KVM_EL0_TIMERS; i++) { + struct arch_timer_context *timer = &arch_timer->timers[i]; + bool status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT; + bool level = kvm_timer_irq_can_fire(timer) && status; + + if (level != timer->irq.level) + kvm_timer_update_irq(vcpu, level, timer); + } +} + /* Only called for a fully emulated timer */ static void timer_emulate(struct arch_timer_context *ctx) { @@ -960,6 +982,8 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) if (unlikely(!timer->enabled)) return; + kvm_timer_unblocking(vcpu); + get_timer_map(vcpu, &map); #ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS @@ -988,7 +1012,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) if (static_branch_likely(&has_gic_active_state) && map.direct_ptimer) kvm_timer_vcpu_load_gic(map.direct_ptimer); #endif - kvm_timer_unblocking(vcpu); timer_restore_state(map.direct_vtimer); @@ -1201,7 +1224,9 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) ctxt->vcpu = vcpu; - if (timerid == TIMER_VTIMER) + if (kvm_is_realm(vcpu->kvm)) + ctxt->offset.vm_offset = NULL; + else if (timerid == TIMER_VTIMER) ctxt->offset.vm_offset = &kvm->arch.timer_data.voffset; else ctxt->offset.vm_offset = &kvm->arch.timer_data.poffset; @@ -1224,13 +1249,19 @@ static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); + u64 cntvoff; for (int i = 0; i < NR_KVM_TIMERS; i++) timer_context_init(vcpu, i); + if (kvm_is_realm(vcpu->kvm)) + cntvoff = 0; + else + cntvoff = kvm_phys_timer_read(); + /* Synchronize offsets across timers of a VM if not already provided */ if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags)) { - timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read()); + timer_set_offset(vcpu_vtimer(vcpu), cntvoff); timer_set_offset(vcpu_ptimer(vcpu), 0); } @@ -1856,6 +1887,13 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) return 0; #endif + /* + * We don't use mapped IRQs for Realms because the RMI doesn't allow + * us setting the LR.HW bit in the VGIC. + */ + if (vcpu_is_rec(vcpu)) + return 0; + get_timer_map(vcpu, &map); #ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS @@ -1994,6 +2032,9 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, if (offset->reserved) return -EINVAL; + if (kvm_is_realm(kvm)) + return -EINVAL; + mutex_lock(&kvm->lock); if (lock_all_vcpus(kvm)) { diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7eb8eada1ef293c3e6334c23197816fae8536f03..bfddc748a2b688da63c9c805f8ce6b6b108f7982 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -39,10 +40,8 @@ #include #include #include +#include #include -#include -#include -#include #include #include @@ -52,6 +51,8 @@ static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; #include "hisilicon/hisi_virt.h" +DEFINE_STATIC_KEY_FALSE(kvm_rme_is_available); + DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); @@ -250,13 +251,6 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->slots_lock); break; -#ifdef CONFIG_HISI_VIRTCCA_HOST - case KVM_CAP_ARM_TMM: - r = 0; - if (static_branch_unlikely(&virtcca_cvm_is_available)) - r = kvm_cvm_enable_cap(kvm, cap); - break; -#endif #ifdef CONFIG_ARM64_HDBSS case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: r = kvm_cap_arm_enable_hdbss(kvm, cap); @@ -267,6 +261,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, r = kvm_hisi_ipiv_enable_cap(kvm, cap); break; #endif + case KVM_CAP_ARM_RME: + mutex_lock(&kvm->lock); + r = kvm_realm_enable_cap(kvm, cap); + mutex_unlock(&kvm->lock); + break; default: r = -EINVAL; break; @@ -288,14 +287,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm_arm_cvm_type(type)) { - ret = kvm_enable_virtcca_cvm(kvm); - if (ret) - return ret; - } -#endif - ret = kvm_sched_affinity_vm_init(kvm); if (ret) return ret; @@ -310,6 +301,21 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) mutex_unlock(&kvm->lock); #endif + if (type & ~(KVM_VM_TYPE_ARM_MASK | KVM_VM_TYPE_ARM_IPA_SIZE_MASK)) + return -EINVAL; + + switch (type & KVM_VM_TYPE_ARM_MASK) { + case KVM_VM_TYPE_ARM_NORMAL: + break; + case KVM_VM_TYPE_ARM_REALM: + if (!static_branch_unlikely(&kvm_rme_is_available)) + return -EPERM; + kvm->arch.is_realm = true; + break; + default: + return -EINVAL; + } + ret = kvm_share_hyp(kvm, kvm + 1); if (ret) return ret; @@ -341,21 +347,16 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) kvm_arm_init_hypercalls(kvm); bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); - -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm_arm_cvm_type(type)) { - ret = kvm_init_cvm_vm(kvm); + + /* Initialise the realm bits after the generic bits are enabled */ + if (kvm_is_realm(kvm)) { + ret = kvm_init_realm_vm(kvm); if (ret) - goto out_free_stage2_pgd; + goto err_free_cpumask; } -#endif return 0; -#ifdef CONFIG_HISI_VIRTCCA_HOST -out_free_stage2_pgd: - kvm_free_stage2_pgd(&kvm->arch.mmu); -#endif err_free_cpumask: free_cpumask_var(kvm->arch.supported_cpus); err_unshare_kvm: @@ -390,10 +391,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_unshare_hyp(kvm, kvm + 1); kvm_arm_teardown_hypercalls(kvm); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm_is_virtcca_cvm(kvm)) - kvm_destroy_cvm(kvm); -#endif + kvm_destroy_realm(kvm); } #ifdef CONFIG_ARM64_HISI_IPIV @@ -415,21 +413,23 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ONE_REG: case KVM_CAP_ARM_PSCI: case KVM_CAP_ARM_PSCI_0_2: - case KVM_CAP_READONLY_MEM: case KVM_CAP_MP_STATE: case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: case KVM_CAP_ARM_NISV_TO_USER: case KVM_CAP_ARM_INJECT_EXT_DABT: - case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_PTP_KVM: case KVM_CAP_ARM_SYSTEM_SUSPEND: case KVM_CAP_IRQFD_RESAMPLE: - case KVM_CAP_COUNTER_OFFSET: r = 1; break; + case KVM_CAP_COUNTER_OFFSET: + case KVM_CAP_READONLY_MEM: + case KVM_CAP_SET_GUEST_DEBUG: + r = !kvm_is_realm(kvm); + break; case KVM_CAP_SET_GUEST_DEBUG2: return KVM_GUESTDBG_VALID_MASK; case KVM_CAP_ARM_SET_DEVICE_ADDR: @@ -469,21 +469,19 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = system_supports_mte(); break; case KVM_CAP_STEAL_TIME: -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm && kvm_is_virtcca_cvm(kvm)) + if (kvm_is_realm(kvm)) r = 0; else -#endif r = kvm_arm_pvtime_supported(); break; case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); break; case KVM_CAP_GUEST_DEBUG_HW_BPS: - r = get_num_brps(); + r = kvm_is_realm(kvm) ? 0 : get_num_brps(); break; case KVM_CAP_GUEST_DEBUG_HW_WPS: - r = get_num_wrps(); + r = kvm_is_realm(kvm) ? 0 : get_num_wrps(); break; case KVM_CAP_ARM_PMU_V3: r = kvm_arm_support_pmu_v3(); @@ -495,7 +493,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = get_kvm_ipa_limit(); break; case KVM_CAP_ARM_SVE: - r = system_supports_sve(); + if (kvm_is_realm(kvm)) + r = kvm_rme_supports_sve(); + else + r = system_supports_sve(); break; case KVM_CAP_ARM_PTRAUTH_ADDRESS: case KVM_CAP_ARM_PTRAUTH_GENERIC: @@ -515,15 +516,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = sdev_enable; break; #endif -#ifdef CONFIG_HISI_VIRTCCA_HOST - case KVM_CAP_ARM_TMM: - if (!is_armv8_4_sel2_present()) { - r = -ENXIO; - break; - } - r = static_key_enabled(&virtcca_cvm_is_available); - break; -#endif #ifdef CONFIG_ARM64_HDBSS case KVM_CAP_ARM_HW_DIRTY_STATE_TRACK: r = system_supports_hdbss(); @@ -537,6 +529,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 0; break; #endif + case KVM_CAP_ARM_RME: + r = static_key_enabled(&kvm_rme_is_available); + break; default: r = 0; } @@ -589,6 +584,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) vcpu_clear_flag(vcpu, VCPU_INITIALIZED); bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); + vcpu->arch.rec.mpidr = INVALID_HWID; + vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; /* Set up the timer */ @@ -680,27 +677,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->cpu = cpu; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - if (single_task_running()) - vcpu_clear_wfx_traps(vcpu); - else - vcpu_set_wfx_traps(vcpu); - } -#endif kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) - kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); - return; - } -#endif - if (has_vhe()) - kvm_vcpu_load_sysregs_vhe(vcpu); - kvm_arch_vcpu_load_fp(vcpu); - kvm_vcpu_pmu_restore_guest(vcpu); if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); @@ -716,6 +694,15 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_ptrauth_disable(vcpu); kvm_arch_vcpu_load_debug_state_flags(vcpu); + /* No additional state needs to be loaded on Realmed VMs */ + if (vcpu_is_rec(vcpu)) + return; + + if (has_vhe()) + kvm_vcpu_load_sysregs_vhe(vcpu); + kvm_arch_vcpu_load_fp(vcpu); + kvm_vcpu_pmu_restore_guest(vcpu); + if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) vcpu_set_on_unsupported_cpu(vcpu); @@ -727,23 +714,23 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - kvm_cvm_vcpu_put(vcpu); + kvm_timer_vcpu_put(vcpu); + kvm_vgic_put(vcpu); + + vcpu->cpu = -1; + + if (vcpu_is_rec(vcpu)) return; - } -#endif + kvm_arch_vcpu_put_debug_state_flags(vcpu); kvm_arch_vcpu_put_fp(vcpu); if (has_vhe()) kvm_vcpu_put_sysregs_vhe(vcpu); - kvm_timer_vcpu_put(vcpu); - kvm_vgic_put(vcpu); + kvm_vcpu_pmu_restore_host(vcpu); kvm_arm_vmid_clear_active(); vcpu_clear_on_unsupported_cpu(vcpu); - vcpu->cpu = -1; kvm_tlbi_dvmbm_vcpu_put(vcpu); @@ -910,6 +897,11 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (kvm_vm_is_protected(kvm)) kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu); + if (!irqchip_in_kernel(kvm) && kvm_is_realm(vcpu->kvm)) { + /* Userspace irqchip not yet supported with Realms */ + return -EOPNOTSUPP; + } + mutex_lock(&kvm->arch.config_lock); set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); mutex_unlock(&kvm->arch.config_lock); @@ -1162,18 +1154,6 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) xfer_to_guest_mode_work_pending(); } -#ifdef CONFIG_HISI_VIRTCCA_HOST -static inline void update_pmu_phys_irq(struct kvm_vcpu *vcpu, bool *pmu_stopped) -{ - struct kvm_pmu *pmu = &vcpu->arch.pmu; - - if (pmu->irq_level) { - *pmu_stopped = true; - arm_pmu_set_phys_irq(false); - } -} -#endif - /* * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while * the vCPU is running. @@ -1226,9 +1206,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) run->exit_reason = KVM_EXIT_UNKNOWN; run->flags = 0; while (ret > 0) { -#ifdef CONFIG_HISI_VIRTCCA_HOST bool pmu_stopped = false; -#endif + /* * Check conditions before entering the guest */ @@ -1256,10 +1235,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid); kvm_pmu_flush_hwstate(vcpu); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - update_pmu_phys_irq(vcpu, &pmu_stopped); -#endif + + if (vcpu_is_rec(vcpu)) { + struct kvm_pmu *pmu = &vcpu->arch.pmu; + + if (pmu->irq_level) { + pmu_stopped = true; + arm_pmu_set_phys_irq(false); + } + } local_irq_disable(); @@ -1298,11 +1282,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) trace_kvm_entry(*vcpu_pc(vcpu)); guest_timing_enter_irqoff(); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - ret = kvm_tec_enter(vcpu); + if (vcpu_is_rec(vcpu)) + ret = kvm_rec_enter(vcpu); else -#endif ret = kvm_arm_vcpu_enter_exit(vcpu); vcpu->mode = OUTSIDE_GUEST_MODE; @@ -1357,23 +1339,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) local_irq_enable(); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (!vcpu_is_tec(vcpu)) { -#endif - trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); + /* Exit types that need handling before we can be preempted */ + if (!vcpu_is_rec(vcpu)) { + trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), + *vcpu_pc(vcpu)); - /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, ret); - -#ifdef CONFIG_HISI_VIRTCCA_HOST } -#endif + preempt_enable(); -#ifdef CONFIG_HISI_VIRTCCA_HOST if (pmu_stopped) arm_pmu_set_phys_irq(true); -#endif + /* * The ARMv8 architecture doesn't give the hypervisor * a mechanism to prevent a guest from dropping to AArch32 EL0 @@ -1393,11 +1371,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ret = ARM_EXCEPTION_IL; } -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - ret = handle_cvm_exit(vcpu, ret); + if (vcpu_is_rec(vcpu)) + ret = handle_rec_exit(vcpu, ret); else -#endif ret = handle_exit(vcpu, ret); #ifdef CONFIG_ARCH_VCPU_STAT update_vcpu_stat_time(&vcpu->stat); @@ -1522,6 +1498,19 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, return -EINVAL; } +static unsigned long system_supported_vcpu_features(struct kvm *kvm) +{ + unsigned long features = KVM_VCPU_VALID_FEATURES; + + if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1)) + clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features); + + if (!kvm_is_realm(kvm)) + clear_bit(KVM_ARM_VCPU_REC, &features); + + return features; +} + static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init) { @@ -1536,12 +1525,12 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, return -ENOENT; } + if (features & ~system_supported_vcpu_features(vcpu->kvm)) + return -EINVAL; + if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) return 0; - if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) - return -EINVAL; - /* MTE is incompatible with AArch32 */ if (kvm_has_mte(vcpu->kvm)) return -EINVAL; @@ -1560,6 +1549,10 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, } #endif + /* RME is incompatible with AArch32 */ + if (test_bit(KVM_ARM_VCPU_REC, &features)) + return -EINVAL; + return 0; } @@ -1747,6 +1740,22 @@ static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, return __kvm_arm_vcpu_set_events(vcpu, events); } +static int kvm_arm_vcpu_rmm_psci_complete(struct kvm_vcpu *vcpu, + struct kvm_arm_rmm_psci_complete *arg) +{ + struct kvm_vcpu *target = kvm_mpidr_to_vcpu(vcpu->kvm, arg->target_mpidr); + + if (!target) + return -EINVAL; + + /* + * RMM v1.0 only supports PSCI_RET_SUCCESS or PSCI_RET_DENIED + * for the status. But, let us leave it to the RMM to filter + * for making this future proof. + */ + return realm_psci_complete(vcpu, target, arg->psci_status); +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { @@ -1801,10 +1810,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, if (unlikely(!kvm_vcpu_initialized(vcpu))) break; - r = -EPERM; - if (!kvm_arm_vcpu_is_finalized(vcpu)) - break; - r = -EFAULT; if (copy_from_user(®_list, user_list, sizeof(reg_list))) break; @@ -1869,6 +1874,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return kvm_arm_vcpu_finalize(vcpu, what); } + case KVM_ARM_VCPU_RMM_PSCI_COMPLETE: { + struct kvm_arm_rmm_psci_complete req; + + if (!vcpu_is_rec(vcpu)) + return -EPERM; + if (copy_from_user(&req, argp, sizeof(req))) + return -EFAULT; + return kvm_arm_vcpu_rmm_psci_complete(vcpu, &req); + } default: r = -EINVAL; } @@ -1933,11 +1947,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_device_attr attr; switch (ioctl) { -#ifdef CONFIG_HISI_VIRTCCA_HOST - case KVM_LOAD_USER_DATA: { - return kvm_load_user_data(kvm, arg); - } -#endif case KVM_CREATE_IRQCHIP: { int ret; if (!vgic_present) @@ -2824,13 +2833,8 @@ static __init int kvm_arm_init(void) in_hyp_mode = is_kernel_in_hyp_mode(); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (is_virtcca_cvm_enable() && in_hyp_mode) { - err = kvm_init_tmm(); - if (err) - return err; - } -#endif + if (in_hyp_mode) + kvm_init_rme(); if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || cpus_have_final_cap(ARM64_WORKAROUND_1508412)) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 105f4e00ec8b375ed1f9e37ddbefec43f3026a35..f716298765d3befcbd913fa4fb1096fb5a40640a 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -26,7 +26,6 @@ #include #include #include -#include #include "trace.h" @@ -112,6 +111,24 @@ static u64 core_reg_offset_from_id(u64 id) return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); } +static bool kvm_realm_validate_core_reg(u64 off) +{ + /* + * Note that GPRs can only sometimes be controlled by the VMM. + * For PSCI only X0-X6 are used, higher registers are ignored (restored + * from the REC). + * For HOST_CALL all of X0-X30 are copied to the RsiHostCall structure. + * For emulated MMIO X0 is always used. + */ + switch (off) { + case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... + KVM_REG_ARM_CORE_REG(regs.regs[30]): + case KVM_REG_ARM_CORE_REG(regs.pc): + return true; + } + return false; +} + static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off) { int size; @@ -381,7 +398,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (!vcpu_has_sve(vcpu)) return -ENOENT; - if (kvm_arm_vcpu_sve_finalized(vcpu)) + if (kvm_arm_vcpu_sve_finalized(vcpu) || kvm_realm_is_created(vcpu->kvm)) return -EPERM; /* too late! */ if (WARN_ON(vcpu->arch.sve_state)) @@ -395,7 +412,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (vq_present(vqs, vq)) max_vq = vq; - if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) + if (max_vq > sve_vq_from_vl(kvm_sve_get_max_vl(vcpu->kvm))) return -EINVAL; /* @@ -639,8 +656,6 @@ static const u64 timer_reg_list[] = { KVM_REG_ARM_PTIMER_CVAL, }; -#define NUM_TIMER_REGS ARRAY_SIZE(timer_reg_list) - static bool is_timer_reg(u64 index) { switch (index) { @@ -655,9 +670,14 @@ static bool is_timer_reg(u64 index) return false; } +static unsigned long num_timer_regs(struct kvm_vcpu *vcpu) +{ + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(timer_reg_list); +} + static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { - for (int i = 0; i < NUM_TIMER_REGS; i++) { + for (int i = 0; i < num_timer_regs(vcpu); i++) { if (put_user(timer_reg_list[i], uindices)) return -EFAULT; uindices++; @@ -692,11 +712,11 @@ static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) { const unsigned int slices = vcpu_sve_slices(vcpu); - if (!vcpu_has_sve(vcpu)) + if (!vcpu_has_sve(vcpu) || !kvm_arm_vcpu_sve_finalized(vcpu)) return 0; - /* Policed by KVM_GET_REG_LIST: */ - WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); + if (kvm_is_realm(vcpu->kvm)) + return 1; /* KVM_REG_ARM64_SVE_VLS */ return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) + 1; /* KVM_REG_ARM64_SVE_VLS */ @@ -713,8 +733,8 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, if (!vcpu_has_sve(vcpu)) return 0; - /* Policed by KVM_GET_REG_LIST: */ - WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); + if (!kvm_arm_vcpu_sve_finalized(vcpu)) + return -EPERM; /* * Enumerate this first, so that userspace can save/restore in @@ -725,6 +745,9 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, return -EFAULT; ++num_regs; + if (kvm_is_realm(vcpu->kvm)) + return num_regs; + for (i = 0; i < slices; i++) { for (n = 0; n < SVE_NUM_ZREGS; n++) { reg = KVM_REG_ARM64_SVE_ZREG(n, i); @@ -762,7 +785,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) res += num_sve_regs(vcpu); res += kvm_arm_num_sys_reg_descs(vcpu); res += kvm_arm_get_fw_num_regs(vcpu); - res += NUM_TIMER_REGS; + res += num_timer_regs(vcpu); return res; } @@ -794,7 +817,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) ret = copy_timer_indices(vcpu, uindices); if (ret < 0) return ret; - uindices += NUM_TIMER_REGS; + uindices += num_timer_regs(vcpu); return kvm_arm_copy_sys_reg_indices(vcpu, uindices); } @@ -819,12 +842,39 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) return kvm_arm_sys_reg_get_reg(vcpu, reg); } +/* + * The RMI ABI only enables setting some GPRs and PC. The selection of GPRs + * that are available depends on the Realm state and the reason for the last + * exit. All other registers are reset to architectural or otherwise defined + * reset values by the RMM, except for a few configuration fields that + * correspond to Realm parameters. + */ +static bool validate_realm_set_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) { + u64 off = core_reg_offset_from_id(reg->id); + + return kvm_realm_validate_core_reg(off); + } else { + switch (reg->id) { + case KVM_REG_ARM64_SVE_VLS: + return true; + } + } + + return false; +} + int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { /* We currently use nothing arch-specific in upper 32 bits */ if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) return -EINVAL; + if (kvm_is_realm(vcpu->kvm) && !validate_realm_set_reg(vcpu, reg)) + return -EINVAL; + switch (reg->id & KVM_REG_ARM_COPROC_MASK) { case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); case KVM_REG_ARM_FW: @@ -876,10 +926,30 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, bool has_esr = events->exception.serror_has_esr; bool ext_dabt_pending = events->exception.ext_dabt_pending; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return kvm_cvm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending); -#endif + if (vcpu_is_rec(vcpu)) { + /* Cannot inject SError into a Realm. */ + if (serror_pending) + return -EINVAL; + + /* + * If a data abort is pending, set the flag and let the RMM + * inject an SEA when the REC is scheduled to be run. + */ + if (ext_dabt_pending) { + /* + * Can only inject SEA into a Realm if the previous exit + * was due to a data abort of an Unprotected IPA. + */ + if (!(vcpu->arch.rec.run->enter.flags & REC_ENTER_FLAG_EMULATED_MMIO)) + return -EINVAL; + + vcpu->arch.rec.run->enter.flags &= ~REC_ENTER_FLAG_EMULATED_MMIO; + vcpu->arch.rec.run->enter.flags |= REC_ENTER_FLAG_INJECT_SEA; + } + + return 0; + } + if (serror_pending && has_esr) { if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) return -EINVAL; diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index f02c7b53c922b9198b1b2833ae51f6d1bb412e1a..02ea4d10a12870d4a0fbbef43bb630ed4d4045a8 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -444,14 +444,14 @@ void kvm_arm_teardown_hypercalls(struct kvm *kvm) int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) { - return ARRAY_SIZE(kvm_arm_fw_reg_ids); + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(kvm_arm_fw_reg_ids); } int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { int i; - for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) { + for (i = 0; i < kvm_arm_get_fw_num_regs(vcpu); i++) { if (put_user(kvm_arm_fw_reg_ids[i], uindices++)) return -EFAULT; } diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 0bd93a5f21ce382506803d018ba7df7072ac0aa1..50f434af083822273a45524e993fc569940c791c 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -165,7 +165,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr) */ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) { - if (vcpu_el1_is_32bit(vcpu)) + if (unlikely(vcpu_is_rec(vcpu))) + vcpu->arch.rec.run->enter.flags |= REC_ENTER_FLAG_INJECT_SEA; + else if (vcpu_el1_is_32bit(vcpu)) inject_abt32(vcpu, false, addr); else inject_abt64(vcpu, false, addr); @@ -224,6 +226,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu) */ void kvm_inject_undefined(struct kvm_vcpu *vcpu) { + WARN(vcpu_is_rec(vcpu), "Unexpected undefined exception injection to REC"); if (vcpu_el1_is_32bit(vcpu)) inject_undef32(vcpu); else diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index 9abea8e352408eed118e0c0b347c9084ce2322c0..ac214bd9dbc26ec268b1b7b24031174d06bcb473 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -6,7 +6,7 @@ #include #include -#include +#include #include #include "trace.h" @@ -137,20 +137,21 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, &data); data = vcpu_data_host_to_guest(vcpu, data, len); - vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)-> - tec_entry.gprs[0] = data; - } -#endif + + if (vcpu_is_rec(vcpu)) + vcpu->arch.rec.run->enter.gprs[0] = data; + else + vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data); } /* * The MMIO instruction is emulated and should not be re-executed * in the guest. */ - kvm_incr_pc(vcpu); + if (vcpu_is_rec(vcpu)) + vcpu->arch.rec.run->enter.flags |= REC_ENTER_FLAG_EMULATED_MMIO; + else + kvm_incr_pc(vcpu); return 1; } @@ -170,6 +171,11 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) * volunteered to do so, and bail out otherwise. */ if (!kvm_vcpu_dabt_isvalid(vcpu)) { + if (vcpu_is_rec(vcpu)) { + kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); + return 1; + } + if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, &vcpu->kvm->arch.flags)) { run->exit_reason = KVM_EXIT_ARM_NISV; @@ -213,12 +219,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) run->mmio.phys_addr = fault_ipa; run->mmio.len = len; vcpu->mmio_needed = 1; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags |= - TEC_ENTRY_FLAG_EMUL_MMIO; - } -#endif + if (!ret) { /* We handled the access successfully in the kernel. */ if (!is_write) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 3830aa0b07a0b8b6502880e8ed25c3154f9cc19f..b0dc3489391ade5a1f9ff2c1562212a32d665c63 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -312,6 +312,7 @@ static void invalidate_icache_guest_page(void *va, size_t size) * @start: The intermediate physical base address of the range to unmap * @size: The size of the area to unmap * @may_block: Whether or not we are permitted to block + * @only_shared: If true then protected mappings should not be unmapped * * Clear a range of stage-2 mappings, lowering the various ref-counts. Must * be called while holding mmu_lock (unless for freeing the stage2 pgd before @@ -319,20 +320,26 @@ static void invalidate_icache_guest_page(void *va, size_t size) * with things behind our backs. */ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, - bool may_block) + bool may_block, bool only_shared) { struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); phys_addr_t end = start + size; lockdep_assert_held_write(&kvm->mmu_lock); WARN_ON(size & ~PAGE_MASK); - WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap, - may_block)); + + if (kvm_is_realm(kvm)) + kvm_realm_unmap_range(kvm, start, size, !only_shared); + else + WARN_ON(stage2_apply_range(mmu, start, end, + kvm_pgtable_stage2_unmap, + may_block)); } -static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, + bool may_block) { - __unmap_stage2_range(mmu, start, size, true); + __unmap_stage2_range(mmu, start, size, may_block, false); } static void stage2_flush_memslot(struct kvm *kvm, @@ -341,7 +348,11 @@ static void stage2_flush_memslot(struct kvm *kvm, phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; phys_addr_t end = addr + PAGE_SIZE * memslot->npages; - stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_flush); + if (kvm_is_realm(kvm)) + kvm_realm_unmap_range(kvm, addr, end - addr, false); + else + stage2_apply_range_resched(&kvm->arch.mmu, addr, end, + kvm_pgtable_stage2_flush); } /** @@ -855,30 +866,15 @@ static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { .icache_inval_pou = invalidate_icache_guest_page, }; -/** - * kvm_init_stage2_mmu - Initialise a S2 MMU structure - * @kvm: The pointer to the KVM structure - * @mmu: The pointer to the s2 MMU structure - * @type: The machine type of the virtual machine - * - * Allocates only the stage-2 HW PGD level table(s). - * Note we don't need locking here as this is only called when the VM is - * created, which can only be done once. - */ -int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type) +static int kvm_init_ipa_range(struct kvm *kvm, + struct kvm_s2_mmu *mmu, unsigned long type) { u32 kvm_ipa_limit = get_kvm_ipa_limit(); - int cpu, err; - struct kvm_pgtable *pgt; u64 mmfr0, mmfr1; u32 phys_shift; -#ifdef CONFIG_HISI_VIRTCCA_CODA - if ((type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) && (!kvm_is_virtcca_cvm(kvm))) -#else - if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) -#endif - return -EINVAL; + if (kvm_is_realm(kvm)) + kvm_ipa_limit = kvm_realm_ipa_limit(); phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); if (is_protected_kvm_enabled()) { @@ -900,11 +896,33 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); + return 0; +} + +/** + * kvm_init_stage2_mmu - Initialise a S2 MMU structure + * @kvm: The pointer to the KVM structure + * @mmu: The pointer to the s2 MMU structure + * @type: The machine type of the virtual machine + * + * Allocates only the stage-2 HW PGD level table(s). + * Note we don't need locking here as this is only called when the VM is + * created, which can only be done once. + */ +int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type) +{ + int cpu, err; + struct kvm_pgtable *pgt; + if (mmu->pgt != NULL) { kvm_err("kvm_arch already initialized?\n"); return -EINVAL; } + err = kvm_init_ipa_range(kvm, mmu, type); + if (err) + return err; + pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); if (!pgt) return -ENOMEM; @@ -980,7 +998,8 @@ static void stage2_unmap_memslot(struct kvm *kvm, if (!(vma->vm_flags & VM_PFNMAP)) { gpa_t gpa = addr + (vm_start - memslot->userspace_addr); - unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start); + unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start, + true); } hva = vm_end; } while (hva < reg_end); @@ -999,6 +1018,10 @@ void stage2_unmap_vm(struct kvm *kvm) struct kvm_memory_slot *memslot; int idx, bkt; + /* For realms this is handled by the RMM so nothing to do here */ + if (kvm_is_realm(kvm)) + return; + idx = srcu_read_lock(&kvm->srcu); mmap_read_lock(current->mm); write_lock(&kvm->mmu_lock); @@ -1015,10 +1038,25 @@ void stage2_unmap_vm(struct kvm *kvm) void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) { struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); - struct kvm_pgtable *pgt = NULL; + struct kvm_pgtable *pgt; write_lock(&kvm->mmu_lock); pgt = mmu->pgt; + if (kvm_is_realm(kvm) && + (kvm_realm_state(kvm) != REALM_STATE_DEAD && + kvm_realm_state(kvm) != REALM_STATE_NONE)) { + unmap_stage2_range(mmu, 0, (~0ULL) & PAGE_MASK, false); + write_unlock(&kvm->mmu_lock); + kvm_realm_destroy_rtts(kvm, pgt->ia_bits); + + /* + * The physical PGD pages are delegated to the RMM, so cannot + * be freed at this point. This function will be called again + * from kvm_destroy_realm() after the physical pages have been + * returned at which point the memory can be freed. + */ + return; + } if (pgt) { mmu->pgd_phys = 0; mmu->pgt = NULL; @@ -1081,6 +1119,10 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, if (is_protected_kvm_enabled()) return -EPERM; + /* We don't support mapping special pages into a Realm */ + if (kvm_is_realm(kvm)) + return -EPERM; + size += offset_in_page(guest_ipa); guest_ipa &= PAGE_MASK; @@ -1397,6 +1439,25 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma) return vma->vm_flags & VM_MTE_ALLOWED; } +static int realm_map_ipa(struct kvm *kvm, phys_addr_t ipa, + kvm_pfn_t pfn, unsigned long map_size, + enum kvm_pgtable_prot prot, + struct kvm_mmu_memory_cache *memcache) +{ + struct realm *realm = &kvm->arch.realm; + + if (WARN_ON(!(prot & KVM_PGTABLE_PROT_W))) + return -EFAULT; + + ipa = ALIGN_DOWN(ipa, PAGE_SIZE); + + if (!kvm_realm_is_private_address(realm, ipa)) + return realm_map_non_secure(realm, ipa, pfn, map_size, + memcache); + + return realm_map_protected(realm, ipa, pfn, map_size, memcache); +} + static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, unsigned long fault_status) @@ -1420,12 +1481,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); write_fault = kvm_is_write_fault(vcpu); -#ifdef CONFIG_HISI_VIRTCCA_CODA - if (vcpu_is_tec(vcpu)) { + + /* + * Realms cannot map protected pages read-only + * FIXME: It should be possible to map unprotected pages read-only + */ + if (vcpu_is_rec(vcpu)) write_fault = true; - prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W; - } -#endif + exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); vcpu->stat.mabt_exit_stat++; @@ -1468,6 +1531,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (logging_active) { force_pte = true; vma_shift = PAGE_SHIFT; + } else if (vcpu_is_rec(vcpu)) { + // Force PTE level mappings for realms + force_pte = true; + vma_shift = PAGE_SHIFT; } else { vma_shift = get_vma_page_shift(vma, hva); } @@ -1500,7 +1567,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) fault_ipa &= ~(vma_pagesize - 1); - gfn = fault_ipa >> PAGE_SHIFT; + gfn = kvm_gpa_from_fault(kvm, fault_ipa) >> PAGE_SHIFT; mte_allowed = kvm_vma_mte_allowed(vma); /* Don't use the VMA after the unlock -- it may have vanished */ @@ -1599,6 +1666,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) prot |= KVM_PGTABLE_PROT_X; + /* + * For now we shouldn't be hitting protected addresses because they are + * handled in private_memslot_fault(). In the future this check may be + * relaxed to support e.g. protected devices. + */ + if (vcpu_is_rec(vcpu) && + kvm_gpa_from_fault(kvm, fault_ipa) == fault_ipa) + return -EINVAL; + /* * Under the premise of getting a FSC_PERM fault, we just need to relax * permissions only if vma_pagesize equals fault_granule. Otherwise, @@ -1606,6 +1682,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, */ if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule) ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); + else if (kvm_is_realm(kvm)) + ret = realm_map_ipa(kvm, fault_ipa, pfn, vma_pagesize, + prot, memcache); else ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, __pfn_to_phys(pfn), prot, @@ -1729,7 +1808,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) idx = srcu_read_lock(&vcpu->kvm->srcu); - gfn = fault_ipa >> PAGE_SHIFT; + gfn = kvm_gpa_from_fault(vcpu->kvm, fault_ipa) >> PAGE_SHIFT; memslot = gfn_to_memslot(vcpu->kvm, gfn); hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); write_fault = kvm_is_write_fault(vcpu); @@ -1774,7 +1853,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) * of the page size. */ fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); - ret = io_mem_abort(vcpu, fault_ipa); + ret = io_mem_abort(vcpu, kvm_gpa_from_fault(vcpu->kvm, fault_ipa)); goto out_unlock; } @@ -1813,7 +1892,8 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, (range->end - range->start) << PAGE_SHIFT, - range->may_block); + range->may_block, + !(range->attr_filter & KVM_FILTER_PRIVATE)); return false; } @@ -1859,6 +1939,10 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (!kvm->arch.mmu.pgt) return false; + /* We don't support aging for Realms */ + if (kvm_is_realm(kvm)) + return true; + return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, size, true); @@ -1871,6 +1955,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) if (!kvm->arch.mmu.pgt) return false; + /* We don't support aging for Realms */ + if (kvm_is_realm(kvm)) + return true; + return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, size, false); @@ -2123,7 +2211,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, phys_addr_t size = slot->npages << PAGE_SHIFT; write_lock(&kvm->mmu_lock); - unmap_stage2_range(&kvm->arch.mmu, gpa, size); + unmap_stage2_range(&kvm->arch.mmu, gpa, size, true); write_unlock(&kvm->mmu_lock); } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6469a7c51df3fdc74f0cee74cc3d2ef4ea8b7fb8..4b0403d52bfec3e70471e9e43ad9159adf19e5b6 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -15,7 +15,6 @@ #include #include #include -#include #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) @@ -341,14 +340,9 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) { u64 reg = 0; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; + if (vcpu_is_rec(vcpu)) + return vcpu->arch.rec.run->exit.pmu_ovf_status; - reg = run->tec_exit.pmu_ovf_status; - return reg; - } -#endif if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index b544418b68edf738d89d22523cf864a661c5e035..750b3899d462ef4e3fc429517a5d8f3c3eebc1c6 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -12,7 +12,6 @@ #include #include -#include #include #include @@ -80,10 +79,6 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS; spin_lock(&vcpu->arch.mp_state_lock); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - cvm_psci_complete(source_vcpu, vcpu); -#endif if (!kvm_arm_vcpu_stopped(vcpu)) { if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) ret = PSCI_RET_ALREADY_ON; @@ -108,6 +103,12 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) reset_state->reset = true; kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); + /* + * Make sure we issue PSCI_COMPLETE before the VCPU can be + * scheduled. + */ + if (vcpu_is_rec(vcpu)) + realm_psci_complete(source_vcpu, vcpu, PSCI_RET_SUCCESS); /* * Make sure the reset request is observed if the RUNNABLE mp_state is @@ -120,6 +121,11 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) out_unlock: spin_unlock(&vcpu->arch.mp_state_lock); + if (vcpu_is_rec(vcpu) && ret != PSCI_RET_SUCCESS) { + realm_psci_complete(source_vcpu, vcpu, + ret == PSCI_RET_ALREADY_ON ? + PSCI_RET_SUCCESS : PSCI_RET_DENIED); + } return ret; } @@ -146,10 +152,26 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) /* Ignore other bits of target affinity */ target_affinity &= target_affinity_mask; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return cvm_psci_vcpu_affinity_info(vcpu, target_affinity, lowest_affinity_level); -#endif + + if (vcpu_is_rec(vcpu)) { + struct kvm_vcpu *target_vcpu; + + /* RMM supports only zero affinity level */ + if (lowest_affinity_level != 0) + return PSCI_RET_INVALID_PARAMS; + + target_vcpu = kvm_mpidr_to_vcpu(kvm, target_affinity); + if (!target_vcpu) + return PSCI_RET_INVALID_PARAMS; + + /* + * Provide the references of the source and target RECs to the + * RMM so that the RMM can complete the PSCI request. + */ + realm_psci_complete(vcpu, target_vcpu, PSCI_RET_SUCCESS); + return PSCI_RET_SUCCESS; + } + /* * If one or more VCPU matching target affinity are running * then ON else OFF diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 0de1094d4e1978c3d853a6bd90880e2e4d7ea789..8c507aac3df921af4b46101ed68629d5a58308d1 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -29,7 +29,6 @@ #include #include #include -#include /* Maximum phys_shift supported for any VM on this host */ static u32 __ro_after_init kvm_ipa_limit; @@ -46,7 +45,7 @@ static u32 __ro_after_init kvm_ipa_limit; #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \ PSR_AA32_I_BIT | PSR_AA32_F_BIT) -unsigned int __ro_after_init kvm_sve_max_vl; +static unsigned int __ro_after_init kvm_sve_max_vl; int __init kvm_arm_init_sve(void) { @@ -74,12 +73,20 @@ int __init kvm_arm_init_sve(void) return 0; } +unsigned int kvm_sve_get_max_vl(struct kvm *kvm) +{ + if (kvm_is_realm(kvm)) + return kvm_realm_sve_max_vl(); + else + return kvm_sve_max_vl; +} + static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) { if (!system_supports_sve()) return -EINVAL; - vcpu->arch.sve_max_vl = kvm_sve_max_vl; + vcpu->arch.sve_max_vl = kvm_sve_get_max_vl(vcpu->kvm); /* * Userspace can still customize the vector lengths by writing @@ -140,12 +147,11 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) return -EPERM; return kvm_vcpu_finalize_sve(vcpu); -#ifdef CONFIG_HISI_VIRTCCA_HOST - case KVM_ARM_VCPU_TEC: - if (!kvm_is_virtcca_cvm(vcpu->kvm)) + case KVM_ARM_VCPU_REC: + if (!kvm_is_realm(vcpu->kvm) || !vcpu_is_rec(vcpu)) return -EINVAL; - return kvm_finalize_vcpu_tec(vcpu); -#endif + + return kvm_create_rec(vcpu); } return -EINVAL; @@ -156,6 +162,11 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) return false; + if (kvm_is_realm(vcpu->kvm) && + !(vcpu_is_rec(vcpu) && kvm_arm_rec_finalized(vcpu) && + READ_ONCE(vcpu->kvm->arch.realm.state) == REALM_STATE_ACTIVE)) + return false; + return true; } @@ -170,10 +181,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); kfree(sve_state); kfree(vcpu->arch.ccsidr); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - kvm_destroy_tec(vcpu); -#endif + kvm_destroy_rec(vcpu); #ifdef CONFIG_ARM64_HDBSS if (vcpu->arch.hdbss.br_el2) { diff --git a/arch/arm64/kvm/rme-exit.c b/arch/arm64/kvm/rme-exit.c new file mode 100644 index 0000000000000000000000000000000000000000..68533ad7e057083fc94f9e96f244a1aecd2a873e --- /dev/null +++ b/arch/arm64/kvm/rme-exit.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 ARM Ltd. + */ + +#include +#include +#include + +#include +#include +#include +#include + +typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu); + +static int rec_exit_reason_notimpl(struct kvm_vcpu *vcpu) +{ + struct realm_rec *rec = &vcpu->arch.rec; + + vcpu_err(vcpu, "Unhandled exit reason from realm (ESR: %#llx)\n", + rec->run->exit.esr); + return -ENXIO; +} + +static int rec_exit_sync_dabt(struct kvm_vcpu *vcpu) +{ + struct realm_rec *rec = &vcpu->arch.rec; + + /* + * In the case of a write, copy over gprs[0] to the target GPR, + * preparing to handle MMIO write fault. The content to be written has + * been saved to gprs[0] by the RMM (even if another register was used + * by the guest). In the case of normal memory access this is redundant + * (the guest will replay the instruction), but the overhead is + * minimal. + */ + if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu)) + vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), + rec->run->exit.gprs[0]); + + return kvm_handle_guest_abort(vcpu); +} + +static int rec_exit_sync_iabt(struct kvm_vcpu *vcpu) +{ + struct realm_rec *rec = &vcpu->arch.rec; + + vcpu_err(vcpu, "Unhandled instruction abort (ESR: %#llx).\n", + rec->run->exit.esr); + return -ENXIO; +} + +static int rec_exit_sys_reg(struct kvm_vcpu *vcpu) +{ + struct realm_rec *rec = &vcpu->arch.rec; + unsigned long esr = kvm_vcpu_get_esr(vcpu); + int rt = kvm_vcpu_sys_get_rt(vcpu); + bool is_write = !(esr & 1); + int ret; + + if (is_write) + vcpu_set_reg(vcpu, rt, rec->run->exit.gprs[0]); + + ret = kvm_handle_sys_reg(vcpu); + if (ret > 0 && !is_write) + rec->run->enter.gprs[0] = vcpu_get_reg(vcpu, rt); + + return ret; +} + +static exit_handler_fn rec_exit_handlers[] = { + [0 ... ESR_ELx_EC_MAX] = rec_exit_reason_notimpl, + [ESR_ELx_EC_SYS64] = rec_exit_sys_reg, + [ESR_ELx_EC_DABT_LOW] = rec_exit_sync_dabt, + [ESR_ELx_EC_IABT_LOW] = rec_exit_sync_iabt +}; + +static int rec_exit_psci(struct kvm_vcpu *vcpu) +{ + struct realm_rec *rec = &vcpu->arch.rec; + int i; + + for (i = 0; i < REC_RUN_GPRS; i++) + vcpu_set_reg(vcpu, i, rec->run->exit.gprs[i]); + + return kvm_smccc_call_handler(vcpu); +} + +static int rec_exit_ripas_change(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + struct realm *realm = &kvm->arch.realm; + struct realm_rec *rec = &vcpu->arch.rec; + unsigned long base = rec->run->exit.ripas_base; + unsigned long top = rec->run->exit.ripas_top; + unsigned long ripas = rec->run->exit.ripas_value; + + if (!kvm_realm_is_private_address(realm, base) || + !kvm_realm_is_private_address(realm, top - 1)) { + vcpu_err(vcpu, "Invalid RIPAS_CHANGE for %#lx - %#lx, ripas: %#lx\n", + base, top, ripas); + return -EINVAL; + } + + return 1; +} + +static int rec_exit_host_call(struct kvm_vcpu *vcpu) +{ + int i; + struct realm_rec *rec = &vcpu->arch.rec; + + vcpu->stat.hvc_exit_stat++; + + for (i = 0; i < REC_RUN_GPRS; i++) + vcpu_set_reg(vcpu, i, rec->run->exit.gprs[i]); + + return kvm_smccc_call_handler(vcpu); +} + +static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu) +{ + struct realm_rec *rec = &vcpu->arch.rec; + + __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = rec->run->exit.cntv_ctl; + __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = rec->run->exit.cntv_cval; + __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = rec->run->exit.cntp_ctl; + __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = rec->run->exit.cntp_cval; + + kvm_realm_timers_update(vcpu); +} + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. + */ +int handle_rec_exit(struct kvm_vcpu *vcpu, int rec_run_ret) +{ + struct realm_rec *rec = &vcpu->arch.rec; + u8 esr_ec = ESR_ELx_EC(rec->run->exit.esr); + unsigned long status, index; + + status = RMI_RETURN_STATUS(rec_run_ret); + index = RMI_RETURN_INDEX(rec_run_ret); + + /* + * If a PSCI_SYSTEM_OFF request raced with a vcpu executing, we might + * see the following status code and index indicating an attempt to run + * a REC when the RD state is SYSTEM_OFF. In this case, we just need to + * return to user space which can deal with the system event or will try + * to run the KVM VCPU again, at which point we will no longer attempt + * to enter the Realm because we will have a sleep request pending on + * the VCPU as a result of KVM's PSCI handling. + */ + if (status == RMI_ERROR_REALM && index == 1) { + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; + return 0; + } + + if (rec_run_ret) + return -ENXIO; + + vcpu->arch.fault.esr_el2 = rec->run->exit.esr; + vcpu->arch.fault.far_el2 = rec->run->exit.far; + vcpu->arch.fault.hpfar_el2 = rec->run->exit.hpfar; + + update_arch_timer_irq_lines(vcpu); + + /* Reset the emulation flags for the next run of the REC */ + rec->run->enter.flags = 0; + + switch (rec->run->exit.exit_reason) { + case RMI_EXIT_SYNC: + return rec_exit_handlers[esr_ec](vcpu); + case RMI_EXIT_IRQ: + case RMI_EXIT_FIQ: + return 1; + case RMI_EXIT_PSCI: + return rec_exit_psci(vcpu); + case RMI_EXIT_RIPAS_CHANGE: + return rec_exit_ripas_change(vcpu); + case RMI_EXIT_HOST_CALL: + return rec_exit_host_call(vcpu); + } + + kvm_pr_unimpl("Unsupported exit reason: %u\n", + rec->run->exit.exit_reason); + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + return 0; +} diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c new file mode 100644 index 0000000000000000000000000000000000000000..ce37ca7d4defc924bf5ce591698a4f17b13629a4 --- /dev/null +++ b/arch/arm64/kvm/rme.c @@ -0,0 +1,1743 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023 ARM Ltd. + */ + +#include + +#include +#include +#include +#include + +#include + +static unsigned long rmm_feat_reg0; + +#define RMM_PAGE_SHIFT 12 +#define RMM_PAGE_SIZE BIT(RMM_PAGE_SHIFT) + +#define RMM_RTT_BLOCK_LEVEL 2 +#define RMM_RTT_MAX_LEVEL 3 + +/* See ARM64_HW_PGTABLE_LEVEL_SHIFT() */ +#define RMM_RTT_LEVEL_SHIFT(l) \ + ((RMM_PAGE_SHIFT - 3) * (4 - (l)) + 3) +#define RMM_L2_BLOCK_SIZE BIT(RMM_RTT_LEVEL_SHIFT(2)) + +static inline unsigned long rme_rtt_level_mapsize(int level) +{ + if (WARN_ON(level > RMM_RTT_MAX_LEVEL)) + return RMM_PAGE_SIZE; + + return (1UL << RMM_RTT_LEVEL_SHIFT(level)); +} + +static bool rme_has_feature(unsigned long feature) +{ + return !!u64_get_bits(rmm_feat_reg0, feature); +} + +bool kvm_rme_supports_sve(void) +{ + return rme_has_feature(RMI_FEATURE_REGISTER_0_SVE_EN); +} + +static int rmi_check_version(void) +{ + struct arm_smccc_res res; + unsigned short version_major, version_minor; + unsigned long host_version = RMI_ABI_VERSION(RMI_ABI_MAJOR_VERSION, + RMI_ABI_MINOR_VERSION); + + arm_smccc_1_1_invoke(SMC_RMI_VERSION, host_version, &res); + + if (res.a0 == SMCCC_RET_NOT_SUPPORTED) + return -ENXIO; + + version_major = RMI_ABI_VERSION_GET_MAJOR(res.a1); + version_minor = RMI_ABI_VERSION_GET_MINOR(res.a1); + + if (res.a0 != RMI_SUCCESS) { + unsigned short high_version_major, high_version_minor; + + high_version_major = RMI_ABI_VERSION_GET_MAJOR(res.a2); + high_version_minor = RMI_ABI_VERSION_GET_MINOR(res.a2); + + kvm_err("Unsupported RMI ABI (v%d.%d - v%d.%d) we want v%d.%d\n", + version_major, version_minor, + high_version_major, high_version_minor, + RMI_ABI_MAJOR_VERSION, + RMI_ABI_MINOR_VERSION); + return -ENXIO; + } + + kvm_info("RMI ABI version %d.%d\n", version_major, version_minor); + + return 0; +} + +u32 kvm_realm_ipa_limit(void) +{ + return u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_S2SZ); +} + +u32 kvm_realm_vgic_nr_lr(void) +{ + return u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_GICV3_NUM_LRS); +} + +u32 kvm_realm_get_num_brps(void) +{ + return u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_NUM_BPS); +} + +u32 kvm_realm_get_num_wrps(void) +{ + return u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_NUM_WPS); +} + +unsigned int kvm_realm_sve_max_vl(void) +{ + return sve_vl_from_vq(u64_get_bits(rmm_feat_reg0, + RMI_FEATURE_REGISTER_0_SVE_VL) + 1); +} + +u64 kvm_realm_reset_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val) +{ + u32 bps = u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_NUM_BPS); + u32 wps = u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_NUM_WPS); + u32 ctx_cmps; + + if (!kvm_is_realm(vcpu->kvm)) + return val; + + /* Ensure CTX_CMPs is still valid */ + ctx_cmps = FIELD_GET(ID_AA64DFR0_EL1_CTX_CMPs, val); + ctx_cmps = min(bps, ctx_cmps); + + val &= ~(ID_AA64DFR0_EL1_BRPs_MASK | ID_AA64DFR0_EL1_WRPs_MASK | + ID_AA64DFR0_EL1_CTX_CMPs); + val |= FIELD_PREP(ID_AA64DFR0_EL1_BRPs_MASK, bps) | + FIELD_PREP(ID_AA64DFR0_EL1_WRPs_MASK, wps) | + FIELD_PREP(ID_AA64DFR0_EL1_CTX_CMPs, ctx_cmps); + + return val; +} + +static int get_start_level(struct realm *realm) +{ + return 4 - ((realm->ia_bits - 8) / (RMM_PAGE_SHIFT - 3)); +} + +static int find_map_level(struct realm *realm, + unsigned long start, + unsigned long end) +{ + int level = RMM_RTT_MAX_LEVEL; + + while (level > get_start_level(realm)) { + unsigned long map_size = rme_rtt_level_mapsize(level - 1); + + if (!IS_ALIGNED(start, map_size) || + (start + map_size) > end) + break; + + level--; + } + + return level; +} + +static phys_addr_t alloc_delegated_granule(struct kvm_mmu_memory_cache *mc) +{ + phys_addr_t phys; + void *virt; + + if (mc) + virt = kvm_mmu_memory_cache_alloc(mc); + else + virt = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); + + if (!virt) + return PHYS_ADDR_MAX; + + phys = virt_to_phys(virt); + + if (rmi_granule_delegate(phys)) { + free_page((unsigned long)virt); + + return PHYS_ADDR_MAX; + } + + kvm_account_pgtable_pages(virt, 1); + + return phys; +} + +static void free_delegated_granule(phys_addr_t phys) +{ + if (WARN_ON(rmi_granule_undelegate(phys))) { + /* Undelegate failed: leak the page */ + return; + } + + kvm_account_pgtable_pages(phys_to_virt(phys), -1); + + free_page((unsigned long)phys_to_virt(phys)); +} + +int realm_psci_complete(struct kvm_vcpu *source, struct kvm_vcpu *target, + unsigned long status) +{ + int ret; + + ret = rmi_psci_complete(virt_to_phys(source->arch.rec.rec_page), + virt_to_phys(target->arch.rec.rec_page), + status); + if (ret) + return -EINVAL; + + return 0; +} + +static int realm_rtt_create(struct realm *realm, + unsigned long addr, + int level, + phys_addr_t phys) +{ + addr = ALIGN_DOWN(addr, rme_rtt_level_mapsize(level - 1)); + return rmi_rtt_create(virt_to_phys(realm->rd), phys, addr, level); +} + +static int realm_rtt_fold(struct realm *realm, + unsigned long addr, + int level, + phys_addr_t *rtt_granule) +{ + unsigned long out_rtt; + int ret; + + ret = rmi_rtt_fold(virt_to_phys(realm->rd), addr, level, &out_rtt); + + if (RMI_RETURN_STATUS(ret) == RMI_SUCCESS && rtt_granule) + *rtt_granule = out_rtt; + + return ret; +} + +static int realm_destroy_private_granule(struct realm *realm, + unsigned long ipa, + unsigned long *next_addr, + phys_addr_t *out_rtt) +{ + unsigned long rd = virt_to_phys(realm->rd); + unsigned long rtt_addr; + phys_addr_t rtt; + int ret; + +retry: + ret = rmi_data_destroy(rd, ipa, &rtt_addr, next_addr); + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + if (*next_addr > ipa) + return 0; /* UNASSIGNED */ + rtt = alloc_delegated_granule(NULL); + if (WARN_ON(rtt == PHYS_ADDR_MAX)) + return -ENOMEM; + /* + * ASSIGNED - ipa is mapped as a block, so split. The index + * from the return code should be 2 otherwise it appears + * there's a huge page bigger than KVM currently supports + */ + WARN_ON(RMI_RETURN_INDEX(ret) != 2); + ret = realm_rtt_create(realm, ipa, 3, rtt); + if (WARN_ON(ret)) { + free_delegated_granule(rtt); + return -ENXIO; + } + goto retry; + } else if (WARN_ON(ret)) { + return -ENXIO; + } + + ret = rmi_granule_undelegate(rtt_addr); + if (WARN_ON(ret)) + return -ENXIO; + + *out_rtt = rtt_addr; + + return 0; +} + +static int realm_unmap_private_page(struct realm *realm, + unsigned long ipa, + unsigned long *next_addr) +{ + unsigned long end = ALIGN(ipa + 1, PAGE_SIZE); + unsigned long addr; + phys_addr_t out_rtt = PHYS_ADDR_MAX; + int ret; + + for (addr = ipa; addr < end; addr = *next_addr) { + ret = realm_destroy_private_granule(realm, addr, next_addr, + &out_rtt); + if (ret) + return ret; + } + + if (out_rtt != PHYS_ADDR_MAX) + free_page((unsigned long)phys_to_virt(out_rtt)); + + return 0; +} + +static void realm_unmap_shared_range(struct kvm *kvm, + int level, + unsigned long start, + unsigned long end) +{ + struct realm *realm = &kvm->arch.realm; + unsigned long rd = virt_to_phys(realm->rd); + ssize_t map_size = rme_rtt_level_mapsize(level); + unsigned long next_addr, addr; + unsigned long shared_bit = BIT(realm->ia_bits - 1); + + if (WARN_ON(level > RMM_RTT_MAX_LEVEL)) + return; + + start |= shared_bit; + end |= shared_bit; + + for (addr = start; addr < end; addr = next_addr) { + unsigned long align_addr = ALIGN(addr, map_size); + int ret; + + next_addr = ALIGN(addr + 1, map_size); + + if (align_addr != addr || next_addr > end) { + /* Need to recurse deeper */ + if (addr < align_addr) + next_addr = align_addr; + realm_unmap_shared_range(kvm, level + 1, addr, + min(next_addr, end)); + continue; + } + + ret = rmi_rtt_unmap_unprotected(rd, addr, level, &next_addr); + switch (RMI_RETURN_STATUS(ret)) { + case RMI_SUCCESS: + break; + case RMI_ERROR_RTT: + if (next_addr == addr) { + /* + * There's a mapping here, but it's not a block + * mapping, so reset next_addr to the next block + * boundary and recurse to clear out the pages + * one level deeper. + */ + next_addr = ALIGN(addr + 1, map_size); + realm_unmap_shared_range(kvm, level + 1, addr, + next_addr); + } + break; + default: + WARN_ON(1); + return; + } + + cond_resched_rwlock_write(&kvm->mmu_lock); + } +} + +static int realm_init_sve_param(struct kvm *kvm, struct realm_params *params) +{ + int ret = 0; + unsigned long i; + struct kvm_vcpu *vcpu; + int vl, last_vl = -1; + + /* + * Get the preferred SVE configuration, set by userspace with the + * KVM_ARM_VCPU_SVE feature and KVM_REG_ARM64_SVE_VLS pseudo-register. + */ + kvm_for_each_vcpu(i, vcpu, kvm) { + mutex_lock(&vcpu->mutex); + if (vcpu_has_sve(vcpu)) { + if (!kvm_arm_vcpu_sve_finalized(vcpu)) + ret = -EINVAL; + vl = vcpu->arch.sve_max_vl; + } else { + vl = 0; + } + mutex_unlock(&vcpu->mutex); + if (ret) + return ret; + + /* We need all vCPUs to have the same SVE config */ + if (last_vl >= 0 && last_vl != vl) + return -EINVAL; + + last_vl = vl; + } + + if (last_vl > 0) { + params->sve_vl = sve_vq_from_vl(last_vl) - 1; + params->flags |= RMI_REALM_PARAM_FLAG_SVE; + } + return 0; +} + +/* Calculate the number of s2 root rtts needed */ +static int realm_num_root_rtts(struct realm *realm) +{ + unsigned int ipa_bits = realm->ia_bits; + unsigned int levels = 3 - get_start_level(realm); + unsigned int sl_ipa_bits = (levels + 1) * (RMM_PAGE_SHIFT - 3) + + RMM_PAGE_SHIFT; + + if (sl_ipa_bits >= ipa_bits) + return 1; + + return 1 << (ipa_bits - sl_ipa_bits); +} + +static int realm_create_rd(struct kvm *kvm) +{ + struct realm *realm = &kvm->arch.realm; + struct realm_params *params = realm->params; + void *rd = NULL; + phys_addr_t rd_phys, params_phys; + size_t pgd_size = kvm_pgtable_stage2_pgd_size(kvm->arch.vtcr); + int i, r; + int rtt_num_start; + + realm->ia_bits = VTCR_EL2_IPA(kvm->arch.vtcr); + rtt_num_start = realm_num_root_rtts(realm); + + if (WARN_ON(realm->rd || !realm->params)) + return -EEXIST; + + if (pgd_size / RMM_PAGE_SIZE < rtt_num_start) + return -EINVAL; + + rd = (void *)__get_free_page(GFP_KERNEL); + if (!rd) + return -ENOMEM; + + rd_phys = virt_to_phys(rd); + if (rmi_granule_delegate(rd_phys)) { + r = -ENXIO; + goto free_rd; + } + + for (i = 0; i < pgd_size; i += RMM_PAGE_SIZE) { + phys_addr_t pgd_phys = kvm->arch.mmu.pgd_phys + i; + + if (rmi_granule_delegate(pgd_phys)) { + r = -ENXIO; + goto out_undelegate_tables; + } + } + + params->s2sz = VTCR_EL2_IPA(kvm->arch.vtcr); + params->rtt_level_start = get_start_level(realm); + params->rtt_num_start = rtt_num_start; + params->rtt_base = kvm->arch.mmu.pgd_phys; + params->vmid = realm->vmid; + + r = realm_init_sve_param(kvm, params); + if (r) + goto out_undelegate_tables; + + params_phys = virt_to_phys(params); + + if (rmi_realm_create(rd_phys, params_phys)) { + r = -ENXIO; + goto out_undelegate_tables; + } + + if (WARN_ON(rmi_rec_aux_count(rd_phys, &realm->num_aux))) { + WARN_ON(rmi_realm_destroy(rd_phys)); + goto out_undelegate_tables; + } + + realm->rd = rd; + + return 0; + +out_undelegate_tables: + while (i > 0) { + i -= RMM_PAGE_SIZE; + + phys_addr_t pgd_phys = kvm->arch.mmu.pgd_phys + i; + + if (WARN_ON(rmi_granule_undelegate(pgd_phys))) { + /* Leak the pages if they cannot be returned */ + kvm->arch.mmu.pgt = NULL; + break; + } + } + if (WARN_ON(rmi_granule_undelegate(rd_phys))) { + /* Leak the page if it isn't returned */ + return r; + } +free_rd: + free_page((unsigned long)rd); + return r; +} + +static int realm_rtt_destroy(struct realm *realm, unsigned long addr, + int level, phys_addr_t *rtt_granule, + unsigned long *next_addr) +{ + unsigned long out_rtt; + int ret; + + ret = rmi_rtt_destroy(virt_to_phys(realm->rd), addr, level, + &out_rtt, next_addr); + + *rtt_granule = out_rtt; + + return ret; +} + +static int realm_create_rtt_levels(struct realm *realm, + unsigned long ipa, + int level, + int max_level, + struct kvm_mmu_memory_cache *mc) +{ + if (level == max_level) + return 0; + + while (level++ < max_level) { + phys_addr_t rtt = alloc_delegated_granule(mc); + int ret; + + if (rtt == PHYS_ADDR_MAX) + return -ENOMEM; + + ret = realm_rtt_create(realm, ipa, level, rtt); + + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT && + RMI_RETURN_INDEX(ret) == level - 1) { + /* The RTT already exists, continue */ + continue; + } + if (ret) { + WARN(1, "Failed to create RTT at level %d: %d\n", + level, ret); + free_delegated_granule(rtt); + return -ENXIO; + } + } + + return 0; +} + +static int realm_tear_down_rtt_level(struct realm *realm, int level, + unsigned long start, unsigned long end) +{ + ssize_t map_size; + unsigned long addr, next_addr; + + if (WARN_ON(level > RMM_RTT_MAX_LEVEL)) + return -EINVAL; + + map_size = rme_rtt_level_mapsize(level - 1); + + for (addr = start; addr < end; addr = next_addr) { + phys_addr_t rtt_granule; + int ret; + unsigned long align_addr = ALIGN(addr, map_size); + + next_addr = ALIGN(addr + 1, map_size); + + if (next_addr > end || align_addr != addr) { + /* + * The target range is smaller than what this level + * covers, recurse deeper. + */ + ret = realm_tear_down_rtt_level(realm, + level + 1, + addr, + min(next_addr, end)); + if (ret) + return ret; + continue; + } + + ret = realm_rtt_destroy(realm, addr, level, + &rtt_granule, &next_addr); + + switch (RMI_RETURN_STATUS(ret)) { + case RMI_SUCCESS: + free_delegated_granule(rtt_granule); + break; + case RMI_ERROR_RTT: + if (next_addr > addr) { + /* Missing RTT, skip */ + break; + } + /* + * We tear down the RTT range for the full IPA + * space, after everything is unmapped. Also we + * descend down only if we cannot tear down a + * top level RTT. Thus RMM must be able to walk + * to the requested level. e.g., a block mapping + * exists at L1 or L2. + */ + if (WARN_ON(RMI_RETURN_INDEX(ret) != level)) + return -EBUSY; + if (WARN_ON(level == RMM_RTT_MAX_LEVEL)) + return -EBUSY; + + /* + * The table has active entries in it, recurse deeper + * and tear down the RTTs. + */ + next_addr = ALIGN(addr + 1, map_size); + ret = realm_tear_down_rtt_level(realm, + level + 1, + addr, + next_addr); + if (ret) + return ret; + /* + * Now that the child RTTs are destroyed, + * retry at this level. + */ + next_addr = addr; + break; + default: + WARN_ON(1); + return -ENXIO; + } + } + + return 0; +} + +static int realm_tear_down_rtt_range(struct realm *realm, + unsigned long start, unsigned long end) +{ + return realm_tear_down_rtt_level(realm, get_start_level(realm) + 1, + start, end); +} + +/* + * Returns 0 on successful fold, a negative value on error, a positive value if + * we were not able to fold all tables at this level. + */ +static int realm_fold_rtt_level(struct realm *realm, int level, + unsigned long start, unsigned long end) +{ + int not_folded = 0; + ssize_t map_size; + unsigned long addr, next_addr; + + if (WARN_ON(level > RMM_RTT_MAX_LEVEL)) + return -EINVAL; + + map_size = rme_rtt_level_mapsize(level - 1); + + for (addr = start; addr < end; addr = next_addr) { + phys_addr_t rtt_granule; + int ret; + unsigned long align_addr = ALIGN(addr, map_size); + + next_addr = ALIGN(addr + 1, map_size); + + ret = realm_rtt_fold(realm, align_addr, level, &rtt_granule); + + switch (RMI_RETURN_STATUS(ret)) { + case RMI_SUCCESS: + free_delegated_granule(rtt_granule); + break; + case RMI_ERROR_RTT: + if (level == RMM_RTT_MAX_LEVEL || + RMI_RETURN_INDEX(ret) < level) { + not_folded++; + break; + } + /* Recurse a level deeper */ + ret = realm_fold_rtt_level(realm, + level + 1, + addr, + next_addr); + if (ret < 0) + return ret; + else if (ret == 0) + /* Try again at this level */ + next_addr = addr; + break; + default: + WARN_ON(1); + return -ENXIO; + } + } + + return not_folded; +} + +void kvm_realm_destroy_rtts(struct kvm *kvm, u32 ia_bits) +{ + struct realm *realm = &kvm->arch.realm; + + WARN_ON(realm_tear_down_rtt_range(realm, 0, (1UL << ia_bits))); +} + +static void realm_unmap_private_range(struct kvm *kvm, + unsigned long start, + unsigned long end) +{ + struct realm *realm = &kvm->arch.realm; + unsigned long next_addr, addr; + int ret; + + for (addr = start; addr < end; addr = next_addr) { + ret = realm_unmap_private_page(realm, addr, &next_addr); + + if (ret) + break; + } + + realm_fold_rtt_level(realm, get_start_level(realm) + 1, + start, end); +} + +void kvm_realm_unmap_range(struct kvm *kvm, unsigned long start, + unsigned long size, bool unmap_private) +{ + unsigned long end = start + size; + struct realm *realm = &kvm->arch.realm; + + end = min(BIT(realm->ia_bits - 1), end); + + if (realm->state == REALM_STATE_NONE) + return; + + realm_unmap_shared_range(kvm, find_map_level(realm, start, end), + start, end); + if (unmap_private) + realm_unmap_private_range(kvm, start, end); +} + +static int realm_create_protected_data_granule(struct realm *realm, + unsigned long ipa, + phys_addr_t dst_phys, + phys_addr_t src_phys, + unsigned long flags) +{ + phys_addr_t rd = virt_to_phys(realm->rd); + int ret; + + if (rmi_granule_delegate(dst_phys)) + return -ENXIO; + + ret = rmi_data_create(rd, dst_phys, ipa, src_phys, flags); + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + /* Create missing RTTs and retry */ + int level = RMI_RETURN_INDEX(ret); + + WARN_ON(level == RMM_RTT_MAX_LEVEL); + + ret = realm_create_rtt_levels(realm, ipa, level, + RMM_RTT_MAX_LEVEL, NULL); + if (ret) + return -EIO; + + ret = rmi_data_create(rd, dst_phys, ipa, src_phys, flags); + } + if (ret) + return -EIO; + + return 0; +} + +static int realm_create_protected_data_page(struct realm *realm, + unsigned long ipa, + struct page *dst_page, + struct page *src_page, + unsigned long flags) +{ + unsigned long rd = virt_to_phys(realm->rd); + phys_addr_t dst_phys, src_phys; + bool undelegate_failed = false; + int ret, offset; + + dst_phys = page_to_phys(dst_page); + src_phys = page_to_phys(src_page); + copy_page((void *)src_phys, (void *)dst_phys); + + for (offset = 0; offset < PAGE_SIZE; offset += RMM_PAGE_SIZE) { + + ret = realm_create_protected_data_granule(realm, + ipa, + dst_phys, + src_phys, + flags); + if (ret) + goto err; + + ipa += RMM_PAGE_SIZE; + dst_phys += RMM_PAGE_SIZE; + src_phys += RMM_PAGE_SIZE; + } + + return 0; + +err: + if (ret == -EIO) { + /* current offset needs undelegating */ + if (WARN_ON(rmi_granule_undelegate(dst_phys))) + undelegate_failed = true; + } + while (offset > 0) { + ipa -= RMM_PAGE_SIZE; + offset -= RMM_PAGE_SIZE; + dst_phys -= RMM_PAGE_SIZE; + + rmi_data_destroy(rd, ipa, NULL, NULL); + + if (WARN_ON(rmi_granule_undelegate(dst_phys))) + undelegate_failed = true; + } + + if (undelegate_failed) { + /* + * A granule could not be undelegated, + * so the page has to be leaked + */ + get_page(dst_page); + } + + return -ENXIO; +} + +static int fold_rtt(struct realm *realm, unsigned long addr, int level) +{ + phys_addr_t rtt_addr; + int ret; + + ret = realm_rtt_fold(realm, addr, level, &rtt_addr); + if (ret) + return ret; + + free_delegated_granule(rtt_addr); + + return 0; +} + +int realm_map_protected(struct realm *realm, + unsigned long ipa, + kvm_pfn_t pfn, + unsigned long map_size, + struct kvm_mmu_memory_cache *memcache) +{ + phys_addr_t phys = __pfn_to_phys(pfn); + phys_addr_t rd = virt_to_phys(realm->rd); + unsigned long base_ipa = ipa; + unsigned long size; + int map_level; + int ret = 0; + + if (WARN_ON(!IS_ALIGNED(map_size, RMM_PAGE_SIZE))) + return -EINVAL; + + if (WARN_ON(!IS_ALIGNED(ipa, map_size))) + return -EINVAL; + + if (IS_ALIGNED(map_size, RMM_L2_BLOCK_SIZE)) + map_level = 2; + else + map_level = 3; + + if (map_level < RMM_RTT_MAX_LEVEL) { + /* + * A temporary RTT is needed during the map, precreate it, + * however if there is an error (e.g. missing parent tables) + * this will be handled below. + */ + realm_create_rtt_levels(realm, ipa, map_level, + RMM_RTT_MAX_LEVEL, memcache); + } + + for (size = 0; size < map_size; size += RMM_PAGE_SIZE) { + if (rmi_granule_delegate(phys)) { + /* + * It's likely we raced with another VCPU on the same + * fault. Assume the other VCPU has handled the fault + * and return to the guest. + */ + return 0; + } + + ret = rmi_data_create_unknown(rd, phys, ipa); + + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + /* Create missing RTTs and retry */ + int level = RMI_RETURN_INDEX(ret); + + WARN_ON(level == RMM_RTT_MAX_LEVEL); + + ret = realm_create_rtt_levels(realm, ipa, level, + RMM_RTT_MAX_LEVEL, + memcache); + if (ret) + goto err_undelegate; + + ret = rmi_data_create_unknown(rd, phys, ipa); + } + + if (WARN_ON(ret)) + goto err_undelegate; + + phys += RMM_PAGE_SIZE; + ipa += RMM_PAGE_SIZE; + } + + if (map_size == RMM_L2_BLOCK_SIZE) { + ret = fold_rtt(realm, base_ipa, map_level + 1); + if (WARN_ON(ret)) + goto err; + } + + return 0; + +err_undelegate: + if (WARN_ON(rmi_granule_undelegate(phys))) { + /* Page can't be returned to NS world so is lost */ + get_page(phys_to_page(phys)); + } +err: + while (size > 0) { + unsigned long data, top; + + phys -= RMM_PAGE_SIZE; + size -= RMM_PAGE_SIZE; + ipa -= RMM_PAGE_SIZE; + + WARN_ON(rmi_data_destroy(rd, ipa, &data, &top)); + + if (WARN_ON(rmi_granule_undelegate(phys))) { + /* Page can't be returned to NS world so is lost */ + get_page(phys_to_page(phys)); + } + } + return -ENXIO; +} + +int realm_map_non_secure(struct realm *realm, + unsigned long ipa, + kvm_pfn_t pfn, + unsigned long size, + struct kvm_mmu_memory_cache *memcache) +{ + phys_addr_t rd = virt_to_phys(realm->rd); + phys_addr_t phys = __pfn_to_phys(pfn); + unsigned long offset; + int map_size, map_level; + int ret = 0; + + if (WARN_ON(!IS_ALIGNED(size, RMM_PAGE_SIZE))) + return -EINVAL; + + if (WARN_ON(!IS_ALIGNED(ipa, size))) + return -EINVAL; + + if (IS_ALIGNED(size, RMM_L2_BLOCK_SIZE)) { + map_level = 2; + map_size = RMM_L2_BLOCK_SIZE; + } else { + map_level = 3; + map_size = RMM_PAGE_SIZE; + } + + for (offset = 0; offset < size; offset += map_size) { + /* + * realm_map_ipa() enforces that the memory is writable, + * so for now we permit both read and write. + */ + unsigned long desc = phys | + PTE_S2_MEMATTR(MT_S2_FWB_NORMAL) | + (3 << 6); + ret = rmi_rtt_map_unprotected(rd, ipa, map_level, desc); + + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + /* Create missing RTTs and retry */ + int level = RMI_RETURN_INDEX(ret); + + ret = realm_create_rtt_levels(realm, ipa, level, + map_level, memcache); + if (ret) + return -ENXIO; + + ret = rmi_rtt_map_unprotected(rd, ipa, map_level, desc); + } + /* + * RMI_ERROR_RTT can be reported for two reasons: either the + * RTT tables are not there, or there is an RTTE already + * present for the address. The call to + * realm_create_rtt_levels() above handles the first case, and + * in the second case this indicates that another thread has + * already populated the RTTE for us, so we can ignore the + * error and continue. + */ + if (ret && RMI_RETURN_STATUS(ret) != RMI_ERROR_RTT) + return -ENXIO; + + ipa += map_size; + phys += map_size; + } + + return 0; +} + +static int populate_region(struct kvm *kvm, + phys_addr_t ipa_base, + phys_addr_t ipa_end, + unsigned long data_flags) +{ + struct realm *realm = &kvm->arch.realm; + struct kvm_memory_slot *memslot; + gfn_t base_gfn, end_gfn; + int idx; + phys_addr_t ipa = ipa_base; + struct page *tmp_page; + int ret = 0; + + base_gfn = gpa_to_gfn(ipa_base); + end_gfn = gpa_to_gfn(ipa_end); + + idx = srcu_read_lock(&kvm->srcu); + memslot = gfn_to_memslot(kvm, base_gfn); + if (!memslot) { + ret = -EFAULT; + goto out; + } + + /* We require the region to be contained within a single memslot */ + if (memslot->base_gfn + memslot->npages < end_gfn) { + ret = -EINVAL; + goto out; + } + + tmp_page = alloc_page(GFP_KERNEL); + if (!tmp_page) { + ret = -ENOMEM; + goto out; + } + + mmap_read_lock(current->mm); + + while (ipa < ipa_end) { + struct vm_area_struct *vma; + unsigned long hva; + struct page *page; + kvm_pfn_t pfn; + + hva = gfn_to_hva_memslot(memslot, gpa_to_gfn(ipa)); + vma = vma_lookup(current->mm, hva); + if (!vma) { + ret = -EFAULT; + break; + } + + pfn = gfn_to_pfn_memslot(memslot, gpa_to_gfn(ipa)); + + if (is_error_pfn(pfn)) { + ret = -EFAULT; + break; + } + + page = pfn_to_page(pfn); + + ret = realm_create_protected_data_page(realm, ipa, + page, + tmp_page, + data_flags); + if (ret) { + kvm_release_page_clean(page); + break; + } + + ipa += PAGE_SIZE; + kvm_release_pfn_dirty(pfn); + } +out: + mmap_read_unlock(current->mm); + __free_page(tmp_page); + srcu_read_unlock(&kvm->srcu, idx); + return ret; +} + +static int kvm_populate_realm(struct kvm *kvm, + struct arm_rme_populate_realm *args) +{ + phys_addr_t ipa_base, ipa_end; + unsigned long data_flags = 0; + + if (kvm_realm_state(kvm) != REALM_STATE_NEW) + return -EPERM; + + if (!IS_ALIGNED(args->base, PAGE_SIZE) || + !IS_ALIGNED(args->size, PAGE_SIZE) || + (args->flags & ~RMI_MEASURE_CONTENT)) + return -EINVAL; + + ipa_base = args->base; + ipa_end = ipa_base + args->size; + + if (ipa_end < ipa_base) + return -EINVAL; + + if (args->flags & RMI_MEASURE_CONTENT) + data_flags |= RMI_MEASURE_CONTENT; + + /* + * Perform the population in parts to ensure locks are not held for too + * long + */ + while (ipa_base < ipa_end) { + phys_addr_t end = min(ipa_end, ipa_base + SZ_2M); + + int ret = populate_region(kvm, ipa_base, end, + args->flags); + + if (ret) + return ret; + + ipa_base = end; + + cond_resched(); + } + + return 0; +} + +static int realm_set_ipa_state(struct kvm_vcpu *vcpu, + unsigned long start, + unsigned long end, + unsigned long ripas, + unsigned long *top_ipa) +{ + struct kvm *kvm = vcpu->kvm; + struct realm *realm = &kvm->arch.realm; + struct realm_rec *rec = &vcpu->arch.rec; + phys_addr_t rd_phys = virt_to_phys(realm->rd); + phys_addr_t rec_phys = virt_to_phys(rec->rec_page); + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + unsigned long ipa = start; + int ret = 0; + + while (ipa < end) { + unsigned long next; + + ret = rmi_rtt_set_ripas(rd_phys, rec_phys, ipa, end, &next); + + if (RMI_RETURN_STATUS(ret) == RMI_SUCCESS) { + ipa = next; + } else if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + int walk_level = RMI_RETURN_INDEX(ret); + int level = find_map_level(realm, ipa, end); + + /* + * If the RMM walk ended early then more tables are + * needed to reach the required depth to set the RIPAS. + */ + if (walk_level < level) { + ret = realm_create_rtt_levels(realm, ipa, + walk_level, + level, + memcache); + /* Retry with RTTs created */ + if (!ret) + continue; + } else { + ret = -EINVAL; + } + + break; + } else { + WARN(1, "Unexpected error in %s: %#x\n", __func__, + ret); + ret = -ENXIO; + break; + } + } + + *top_ipa = ipa; + + if (ripas == RMI_EMPTY && ipa != start) + realm_unmap_private_range(kvm, start, ipa); + + return ret; +} + +static int realm_init_ipa_state(struct realm *realm, + unsigned long ipa, + unsigned long end) +{ + phys_addr_t rd_phys = virt_to_phys(realm->rd); + int ret; + + while (ipa < end) { + unsigned long next; + + ret = rmi_rtt_init_ripas(rd_phys, ipa, end, &next); + + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + int err_level = RMI_RETURN_INDEX(ret); + int level = find_map_level(realm, ipa, end); + + if (WARN_ON(err_level >= level)) + return -ENXIO; + + ret = realm_create_rtt_levels(realm, ipa, + err_level, + level, NULL); + if (ret) + return ret; + /* Retry with the RTT levels in place */ + continue; + } else if (WARN_ON(ret)) { + return -ENXIO; + } + + ipa = next; + } + + return 0; +} + +static int kvm_init_ipa_range_realm(struct kvm *kvm, + struct arm_rme_init_ripas *args) +{ + gpa_t addr, end; + struct realm *realm = &kvm->arch.realm; + + addr = args->base; + end = addr + args->size; + + if (end < addr) + return -EINVAL; + + if (kvm_realm_state(kvm) != REALM_STATE_NEW) + return -EPERM; + + return realm_init_ipa_state(realm, addr, end); +} + +static int kvm_activate_realm(struct kvm *kvm) +{ + struct realm *realm = &kvm->arch.realm; + + if (kvm_realm_state(kvm) != REALM_STATE_NEW) + return -EINVAL; + + if (rmi_realm_activate(virt_to_phys(realm->rd))) + return -ENXIO; + + WRITE_ONCE(realm->state, REALM_STATE_ACTIVE); + return 0; +} + +/* Protects access to rme_vmid_bitmap */ +static DEFINE_SPINLOCK(rme_vmid_lock); +static unsigned long *rme_vmid_bitmap; + +static int rme_vmid_init(void) +{ + unsigned int vmid_count = 1 << kvm_get_vmid_bits(); + + rme_vmid_bitmap = bitmap_zalloc(vmid_count, GFP_KERNEL); + if (!rme_vmid_bitmap) { + kvm_err("%s: Couldn't allocate rme vmid bitmap\n", __func__); + return -ENOMEM; + } + + return 0; +} + +static int rme_vmid_reserve(void) +{ + int ret; + unsigned int vmid_count = 1 << kvm_get_vmid_bits(); + + spin_lock(&rme_vmid_lock); + ret = bitmap_find_free_region(rme_vmid_bitmap, vmid_count, 0); + spin_unlock(&rme_vmid_lock); + + return ret; +} + +static void rme_vmid_release(unsigned int vmid) +{ + spin_lock(&rme_vmid_lock); + bitmap_release_region(rme_vmid_bitmap, vmid, 0); + spin_unlock(&rme_vmid_lock); +} + +static int kvm_create_realm(struct kvm *kvm) +{ + struct realm *realm = &kvm->arch.realm; + int ret; + + if (!kvm_is_realm(kvm)) + return -EINVAL; + if (kvm_realm_is_created(kvm)) + return -EEXIST; + + ret = rme_vmid_reserve(); + if (ret < 0) + return ret; + realm->vmid = ret; + + ret = realm_create_rd(kvm); + if (ret) { + rme_vmid_release(realm->vmid); + return ret; + } + + WRITE_ONCE(realm->state, REALM_STATE_NEW); + + /* The realm is up, free the parameters. */ + free_page((unsigned long)realm->params); + realm->params = NULL; + + return 0; +} + +static int config_realm_hash_algo(struct realm *realm, + struct arm_rme_config *cfg) +{ + switch (cfg->hash_algo) { + case ARM_RME_CONFIG_MEASUREMENT_ALGO_SHA256: + if (!rme_has_feature(RMI_FEATURE_REGISTER_0_HASH_SHA_256)) + return -EINVAL; + break; + case ARM_RME_CONFIG_MEASUREMENT_ALGO_SHA512: + if (!rme_has_feature(RMI_FEATURE_REGISTER_0_HASH_SHA_512)) + return -EINVAL; + break; + default: + return -EINVAL; + } + realm->params->hash_algo = cfg->hash_algo; + return 0; +} + +static int config_realm_pmu(struct realm *realm, + struct arm_rme_config *cfg) +{ + int pmu_max_ctrs = u64_get_bits(rmm_feat_reg0, + RMI_FEATURE_REGISTER_0_PMU_NUM_CTRS); + + if (!rme_has_feature(RMI_FEATURE_REGISTER_0_PMU_EN)) + return -EINVAL; + + if (cfg->num_pmu_cntrs > pmu_max_ctrs) + return -EINVAL; + + realm->params->pmu_num_ctrs = cfg->num_pmu_cntrs; + realm->params->flags |= RMI_REALM_PARAM_FLAG_PMU; + + realm->pmu_enabled = true; + return 0; +} + +static int config_realm_debug(struct realm *realm, + struct arm_rme_config *cfg) +{ + if (cfg->num_brps > kvm_realm_get_num_brps()) + return -EINVAL; + + if (cfg->num_wrps > kvm_realm_get_num_wrps()) + return -EINVAL; + + realm->params->num_bps = cfg->num_brps; + realm->params->num_wps = cfg->num_wrps; + + return 0; +} + +static int kvm_rme_config_realm(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + struct arm_rme_config cfg; + struct realm *realm = &kvm->arch.realm; + int r = 0; + + if (kvm_realm_is_created(kvm)) + return -EBUSY; + + if (copy_from_user(&cfg, (void __user *)cap->args[1], sizeof(cfg))) + return -EFAULT; + + switch (cfg.cfg) { + case ARM_RME_CONFIG_RPV: + memcpy(&realm->params->rpv, &cfg.rpv, sizeof(cfg.rpv)); + break; + case ARM_RME_CONFIG_HASH_ALGO: + r = config_realm_hash_algo(realm, &cfg); + break; + case KVM_CAP_ARM_RME_CFG_PMU: + r = config_realm_pmu(realm, &cfg); + break; + case KVM_CAP_ARM_RME_CFG_DBG: + r = config_realm_debug(realm, &cfg); + break; + default: + r = -EINVAL; + } + + return r; +} + +int kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + int r = 0; + + if (!kvm_is_realm(kvm)) + return -EINVAL; + + switch (cap->args[0]) { + case KVM_CAP_ARM_RME_CONFIG_REALM: + r = kvm_rme_config_realm(kvm, cap); + break; + case KVM_CAP_ARM_RME_CREATE_REALM: + r = kvm_create_realm(kvm); + break; + case KVM_CAP_ARM_RME_INIT_RIPAS_REALM: { + struct arm_rme_init_ripas args; + void __user *argp = u64_to_user_ptr(cap->args[1]); + + if (copy_from_user(&args, argp, sizeof(args))) { + r = -EFAULT; + break; + } + + r = kvm_init_ipa_range_realm(kvm, &args); + break; + } + case KVM_CAP_ARM_RME_POPULATE_REALM: { + struct arm_rme_populate_realm args; + void __user *argp = u64_to_user_ptr(cap->args[1]); + + if (copy_from_user(&args, argp, sizeof(args))) { + r = -EFAULT; + break; + } + + r = kvm_populate_realm(kvm, &args); + break; + } + case KVM_CAP_ARM_RME_ACTIVATE_REALM: + r = kvm_activate_realm(kvm); + break; + default: + r = -EINVAL; + break; + } + + return r; +} + +void kvm_destroy_realm(struct kvm *kvm) +{ + struct realm *realm = &kvm->arch.realm; + size_t pgd_size = kvm_pgtable_stage2_pgd_size(kvm->arch.vtcr); + int i; + + if (realm->params) { + free_page((unsigned long)realm->params); + realm->params = NULL; + } + + if (!kvm_realm_is_created(kvm)) + return; + + WRITE_ONCE(realm->state, REALM_STATE_DYING); + + if (realm->rd) { + phys_addr_t rd_phys = virt_to_phys(realm->rd); + + if (WARN_ON(rmi_realm_destroy(rd_phys))) + return; + free_delegated_granule(rd_phys); + realm->rd = NULL; + } + + rme_vmid_release(realm->vmid); + + for (i = 0; i < pgd_size; i += RMM_PAGE_SIZE) { + phys_addr_t pgd_phys = kvm->arch.mmu.pgd_phys + i; + + if (WARN_ON(rmi_granule_undelegate(pgd_phys))) + return; + } + + WRITE_ONCE(realm->state, REALM_STATE_DEAD); + + /* Now that the Realm is destroyed, free the entry level RTTs */ + kvm_free_stage2_pgd(&kvm->arch.mmu); +} + +static void kvm_complete_ripas_change(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + struct realm_rec *rec = &vcpu->arch.rec; + unsigned long base = rec->run->exit.ripas_base; + unsigned long top = rec->run->exit.ripas_top; + unsigned long ripas = rec->run->exit.ripas_value; + unsigned long top_ipa; + int ret; + + do { + kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_cache, + kvm_mmu_cache_min_pages(kvm)); + write_lock(&kvm->mmu_lock); + ret = realm_set_ipa_state(vcpu, base, top, ripas, &top_ipa); + write_unlock(&kvm->mmu_lock); + + if (WARN_RATELIMIT(ret && ret != -ENOMEM, + "Unable to satisfy RIPAS_CHANGE for %#lx - %#lx, ripas: %#lx\n", + base, top, ripas)) + break; + + base = top_ipa; + } while (top_ipa < top); +} + +int kvm_rec_enter(struct kvm_vcpu *vcpu) +{ + struct realm_rec *rec = &vcpu->arch.rec; + + switch (rec->run->exit.exit_reason) { + case RMI_EXIT_HOST_CALL: + case RMI_EXIT_PSCI: + for (int i = 0; i < REC_RUN_GPRS; i++) + rec->run->enter.gprs[i] = vcpu_get_reg(vcpu, i); + break; + case RMI_EXIT_RIPAS_CHANGE: + kvm_complete_ripas_change(vcpu); + break; + } + + if (kvm_realm_state(vcpu->kvm) != REALM_STATE_ACTIVE) + return -EINVAL; + + return rmi_rec_enter(virt_to_phys(rec->rec_page), + virt_to_phys(rec->run)); +} + +static void free_rec_aux(struct page **aux_pages, + unsigned int num_aux) +{ + unsigned int i, j; + unsigned int page_count = 0; + + for (i = 0; i < num_aux;) { + struct page *aux_page = aux_pages[page_count++]; + phys_addr_t aux_page_phys = page_to_phys(aux_page); + bool should_free = true; + + for (j = 0; j < PAGE_SIZE && i < num_aux; j += RMM_PAGE_SIZE) { + if (WARN_ON(rmi_granule_undelegate(aux_page_phys))) + should_free = false; + aux_page_phys += RMM_PAGE_SIZE; + i++; + } + /* Only free if all the undelegate calls were successful */ + if (should_free) + __free_page(aux_page); + } +} + +static int alloc_rec_aux(struct page **aux_pages, + u64 *aux_phys_pages, + unsigned int num_aux) +{ + struct page *aux_page; + int page_count = 0; + unsigned int i, j; + int ret; + + for (i = 0; i < num_aux;) { + phys_addr_t aux_page_phys; + + aux_page = alloc_page(GFP_KERNEL); + if (!aux_page) { + ret = -ENOMEM; + goto out_err; + } + + aux_page_phys = page_to_phys(aux_page); + for (j = 0; j < PAGE_SIZE && i < num_aux; j += RMM_PAGE_SIZE) { + if (rmi_granule_delegate(aux_page_phys)) { + ret = -ENXIO; + goto err_undelegate; + } + aux_phys_pages[i++] = aux_page_phys; + aux_page_phys += RMM_PAGE_SIZE; + } + aux_pages[page_count++] = aux_page; + } + + return 0; +err_undelegate: + while (j > 0) { + j -= RMM_PAGE_SIZE; + i--; + if (WARN_ON(rmi_granule_undelegate(aux_phys_pages[i]))) { + /* Leak the page if the undelegate fails */ + goto out_err; + } + } + __free_page(aux_page); +out_err: + free_rec_aux(aux_pages, i); + return ret; +} + +int kvm_create_rec(struct kvm_vcpu *vcpu) +{ + struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu); + unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); + struct realm *realm = &vcpu->kvm->arch.realm; + struct realm_rec *rec = &vcpu->arch.rec; + unsigned long rec_page_phys; + struct rec_params *params; + int r, i; + + if (kvm_realm_state(vcpu->kvm) != REALM_STATE_NEW) + return -ENOENT; + + if (rec->run) + return -EBUSY; + + /* + * The RMM will report PSCI v1.0 to Realms and the KVM_ARM_VCPU_PSCI_0_2 + * flag covers v0.2 and onwards. + */ + if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) + return -EINVAL; + + if (kvm_vcpu_has_pmu(vcpu) != realm->pmu_enabled) + return -EINVAL; + + BUILD_BUG_ON(sizeof(*params) > PAGE_SIZE); + BUILD_BUG_ON(sizeof(*rec->run) > PAGE_SIZE); + + params = (struct rec_params *)get_zeroed_page(GFP_KERNEL); + rec->rec_page = (void *)__get_free_page(GFP_KERNEL); + rec->run = (void *)get_zeroed_page(GFP_KERNEL); + if (!params || !rec->rec_page || !rec->run) { + r = -ENOMEM; + goto out_free_pages; + } + + for (i = 0; i < ARRAY_SIZE(params->gprs); i++) + params->gprs[i] = vcpu_regs->regs[i]; + + params->pc = vcpu_regs->pc; + + if (vcpu->vcpu_id == 0) + params->flags |= REC_PARAMS_FLAG_RUNNABLE; + + rec_page_phys = virt_to_phys(rec->rec_page); + + if (rmi_granule_delegate(rec_page_phys)) { + r = -ENXIO; + goto out_free_pages; + } + + r = alloc_rec_aux(rec->aux_pages, params->aux, realm->num_aux); + if (r) + goto out_undelegate_rmm_rec; + + params->num_rec_aux = realm->num_aux; + params->mpidr = mpidr; + + if (rmi_rec_create(virt_to_phys(realm->rd), + rec_page_phys, + virt_to_phys(params))) { + r = -ENXIO; + goto out_free_rec_aux; + } + + rec->mpidr = mpidr; + + free_page((unsigned long)params); + return 0; + +out_free_rec_aux: + free_rec_aux(rec->aux_pages, realm->num_aux); +out_undelegate_rmm_rec: + if (WARN_ON(rmi_granule_undelegate(rec_page_phys))) + rec->rec_page = NULL; +out_free_pages: + free_page((unsigned long)rec->run); + free_page((unsigned long)rec->rec_page); + free_page((unsigned long)params); + return r; +} + +void kvm_destroy_rec(struct kvm_vcpu *vcpu) +{ + struct realm *realm = &vcpu->kvm->arch.realm; + struct realm_rec *rec = &vcpu->arch.rec; + unsigned long rec_page_phys; + + if (!vcpu_is_rec(vcpu)) + return; + + if (!rec->run) { + /* Nothing to do if the VCPU hasn't been finalized */ + return; + } + + free_page((unsigned long)rec->run); + + rec_page_phys = virt_to_phys(rec->rec_page); + + /* + * The REC and any AUX pages cannot be reclaimed until the REC is + * destroyed. So if the REC destroy fails then the REC page and any AUX + * pages will be leaked. + */ + if (WARN_ON(rmi_rec_destroy(rec_page_phys))) + return; + + free_rec_aux(rec->aux_pages, realm->num_aux); + + free_delegated_granule(rec_page_phys); +} + +int kvm_init_realm_vm(struct kvm *kvm) +{ + kvm->arch.realm.params = (void *)get_zeroed_page(GFP_KERNEL); + + if (!kvm->arch.realm.params) + return -ENOMEM; + return 0; +} + +void kvm_init_rme(void) +{ + if (PAGE_SIZE != SZ_4K) + /* Only 4k page size on the host is supported */ + return; + + if (rmi_check_version()) + /* Continue without realm support */ + return; + + if (WARN_ON(rmi_features(0, &rmm_feat_reg0))) + return; + + if (rme_vmid_init()) + return; + + static_branch_enable(&kvm_rme_is_available); +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 8cd4c95759a02df30dabff05317ccbb3c2559eba..304468d3c6d27cb4ed74a492cc9ac2bd91a48b88 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1526,7 +1526,7 @@ static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, /* Hide BRBE from guests */ val &= ~ID_AA64DFR0_EL1_BRBE_MASK; - return val; + return kvm_realm_reset_id_aa64dfr0_el1(vcpu, val); } static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, @@ -3614,18 +3614,18 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); } -static unsigned int num_demux_regs(void) +static unsigned int num_demux_regs(struct kvm_vcpu *vcpu) { - return CSSELR_MAX; + return kvm_is_realm(vcpu->kvm) ? 0 : CSSELR_MAX; } -static int write_demux_regids(u64 __user *uindices) +static int write_demux_regids(struct kvm_vcpu *vcpu, u64 __user *uindices) { u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; unsigned int i; val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; - for (i = 0; i < CSSELR_MAX; i++) { + for (i = 0; i < num_demux_regs(vcpu); i++) { if (put_user(val | i, uindices)) return -EFAULT; uindices++; @@ -3633,6 +3633,24 @@ static int write_demux_regids(u64 __user *uindices) return 0; } +static unsigned int num_invariant_regs(struct kvm_vcpu *vcpu) +{ + return kvm_is_realm(vcpu->kvm) ? 0 : ARRAY_SIZE(invariant_sys_regs); +} + +static int write_invariant_regids(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + unsigned int i; + + for (i = 0; i < num_invariant_regs(vcpu); i++) { + if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) + return -EFAULT; + uindices++; + } + return 0; +} + + static u64 sys_reg_to_index(const struct sys_reg_desc *reg) { return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | @@ -3656,11 +3674,27 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) return true; } +static bool kvm_realm_sys_reg_hidden_user(const struct kvm_vcpu *vcpu, u64 reg) +{ + if (!kvm_is_realm(vcpu->kvm)) + return false; + + switch (reg) { + case SYS_ID_AA64DFR0_EL1: + case SYS_PMCR_EL0: + return false; + } + return true; +} + static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 __user **uind, unsigned int *total) { + if (kvm_realm_sys_reg_hidden_user(vcpu, reg_to_encoding(rd))) + return 0; + /* * Ignore registers we trap but don't save, * and for which no custom user accessor is provided. @@ -3698,8 +3732,8 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) { - return ARRAY_SIZE(invariant_sys_regs) - + num_demux_regs() + return num_invariant_regs(vcpu) + + num_demux_regs(vcpu) + walk_sys_regs(vcpu, (u64 __user *)NULL); } @@ -3708,19 +3742,17 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) unsigned int i; int err; - /* Then give them all the invariant registers' indices. */ - for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { - if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) - return -EFAULT; - uindices++; - } + err = write_invariant_regids(vcpu, uindices); + if (err) + return err; + uindices += num_invariant_regs(vcpu); err = walk_sys_regs(vcpu, uindices); if (err < 0) return err; uindices += err; - return write_demux_regids(uindices); + return write_demux_regids(vcpu, uindices); } int __init kvm_sys_reg_table_init(void) diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index 9ec452bcfb13e1a51c3e7f8ab645a16782176c3b..14c4ce07523287e724b199274c8ad3f42e2c1e83 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -103,7 +103,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) * the proper checks already. */ if (type == KVM_DEV_TYPE_ARM_VGIC_V2 && - !kvm_vgic_global_state.can_emulate_gicv2) + (!kvm_vgic_global_state.can_emulate_gicv2 || kvm_is_realm(kvm))) return -ENODEV; /* Must be held to avoid race with vCPU creation */ diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index dab599e857b51e8eba1dd15f4c1df614103d655f..b45416bdbb656f6b25c0bdbea391e9b4d512bd6f 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -7,10 +7,11 @@ #include #include #include +#include #include #include #include -#include +#include #include "vgic.h" @@ -681,10 +682,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info) (unsigned long long)info->vcpu.start); } else if (kvm_get_mode() != KVM_MODE_PROTECTED) { kvm_vgic_global_state.vcpu_base = info->vcpu.start; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (!static_branch_unlikely(&virtcca_cvm_is_available)) -#endif - kvm_vgic_global_state.can_emulate_gicv2 = true; + kvm_vgic_global_state.can_emulate_gicv2 = true; ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); if (ret) { kvm_err("Cannot register GICv2 KVM device.\n"); @@ -764,13 +762,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - cpu_if->vgic_vmcr = - ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_exit.gicv3_vmcr; - return; - } -#endif + if (likely(cpu_if->vgic_sre)) cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); } @@ -779,6 +771,9 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + if (vcpu_is_rec(vcpu)) + cpu_if->vgic_vmcr = vcpu->arch.rec.run->exit.gicv3_vmcr; + WARN_ON(vgic_v4_put(vcpu)); vgic_v3_vmcr_sync(vcpu); diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index ec110006acf547b3f53636ebc9cf94a1c7e8abcc..eee76ab9203e541212414f27af1c91192ec11065 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -10,8 +10,9 @@ #include #include +#include #include -#include +#include #include "vgic.h" @@ -22,6 +23,13 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { .gicv3_cpuif = STATIC_KEY_FALSE_INIT, }; +static inline int kvm_vcpu_vgic_nr_lr(struct kvm_vcpu *vcpu) +{ + if (unlikely(vcpu_is_rec(vcpu))) + return kvm_realm_vgic_nr_lr(); + return kvm_vgic_global_state.nr_lr; +} + /* * Locking order is always: * kvm->lock (mutex) @@ -841,7 +849,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) lockdep_assert_held(&vgic_cpu->ap_list_lock); count = compute_ap_list_depth(vcpu, &multi_sgi); - if (count > kvm_vgic_global_state.nr_lr || multi_sgi) + if (count > kvm_vcpu_vgic_nr_lr(vcpu) || multi_sgi) vgic_sort_ap_list(vcpu); count = 0; @@ -870,7 +878,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) raw_spin_unlock(&irq->irq_lock); - if (count == kvm_vgic_global_state.nr_lr) { + if (count == kvm_vcpu_vgic_nr_lr(vcpu)) { if (!list_is_last(&irq->ap_list, &vgic_cpu->ap_list_head)) vgic_set_underflow(vcpu); @@ -879,7 +887,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) } /* Nuke remaining LRs */ - for (i = count ; i < kvm_vgic_global_state.nr_lr; i++) + for (i = count ; i < kvm_vcpu_vgic_nr_lr(vcpu); i++) vgic_clear_lr(vcpu, i); if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) @@ -898,44 +906,25 @@ static inline bool can_access_vgic_from_kernel(void) return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe(); } -#ifdef CONFIG_HISI_VIRTCCA_HOST -static inline void vgic_tmm_save_state(struct kvm_vcpu *vcpu) +static inline void vgic_rmm_save_state(struct kvm_vcpu *vcpu) { - int i; struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run; - - for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) { - cpu_if->vgic_lr[i] = tec_run->tec_exit.gicv3_lrs[i]; - tec_run->tec_entry.gicv3_lrs[i] = 0; - } -} - -static inline void vgic_tmm_restore_state(struct kvm_vcpu *vcpu) -{ int i; - struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run; - for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) { - tec_run->tec_entry.gicv3_lrs[i] = cpu_if->vgic_lr[i]; - tec_run->tec_exit.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + for (i = 0; i < kvm_vcpu_vgic_nr_lr(vcpu); i++) { + cpu_if->vgic_lr[i] = vcpu->arch.rec.run->exit.gicv3_lrs[i]; + vcpu->arch.rec.run->enter.gicv3_lrs[i] = 0; } } -#endif static inline void vgic_save_state(struct kvm_vcpu *vcpu) { if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_save_state(vcpu); + else if (vcpu_is_rec(vcpu)) + vgic_rmm_save_state(vcpu); else -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - vgic_tmm_save_state(vcpu); - else -#endif - __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); - + __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); } /* Sync back the hardware VGIC state into our emulation after a guest's run. */ @@ -960,17 +949,30 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) vgic_prune_ap_list(vcpu); } +static inline void vgic_rmm_restore_state(struct kvm_vcpu *vcpu) +{ + struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + int i; + + for (i = 0; i < kvm_vcpu_vgic_nr_lr(vcpu); i++) { + vcpu->arch.rec.run->enter.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + /* + * Also populate the rec.run->exit copies so that a late + * decision to back out from entering the realm doesn't cause + * the state to be lost + */ + vcpu->arch.rec.run->exit.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + } +} + static inline void vgic_restore_state(struct kvm_vcpu *vcpu) { if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_restore_state(vcpu); + else if (vcpu_is_rec(vcpu)) + vgic_rmm_restore_state(vcpu); else -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - vgic_tmm_restore_state(vcpu); - else -#endif - __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); + __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); } /* Flush our emulation state into the GIC hardware before entering the guest. */ @@ -1009,13 +1011,15 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_vgic_load(struct kvm_vcpu *vcpu) { - if (unlikely(!vgic_initialized(vcpu->kvm))) + if (unlikely(!irqchip_in_kernel(vcpu->kvm) || + !vgic_initialized(vcpu->kvm) || + vcpu_is_rec(vcpu))) { + if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) + __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); return; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return; -#endif - if (kvm_vgic_global_state.type == VGIC_V2) + } + + if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_load(vcpu); else vgic_v3_load(vcpu); @@ -1023,13 +1027,15 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu) void kvm_vgic_put(struct kvm_vcpu *vcpu) { - if (unlikely(!vgic_initialized(vcpu->kvm))) + if (unlikely(!irqchip_in_kernel(vcpu->kvm) || + !vgic_initialized(vcpu->kvm) || + vcpu_is_rec(vcpu))) { + if (has_vhe() && static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) + __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); return; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return; -#endif - if (kvm_vgic_global_state.type == VGIC_V2) + } + + if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_put(vcpu); else vgic_v3_put(vcpu); diff --git a/arch/arm64/kvm/virtcca_cvm_exit.c b/arch/arm64/kvm/virtcca_cvm_exit.c deleted file mode 100644 index 9654375a9c8cbce9ee8b370e74c8533ef49e043c..0000000000000000000000000000000000000000 --- a/arch/arm64/kvm/virtcca_cvm_exit.c +++ /dev/null @@ -1,221 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, The Linux Foundation. All rights reserved. - */ -#include -#include -#include - -#include -#include -#include - -typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu); - -static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu, bool unmask_ctl) -{ - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = run->tec_exit.cntv_ctl; - __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = run->tec_exit.cntv_cval; - __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = run->tec_exit.cntp_ctl; - __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = run->tec_exit.cntp_cval; - - /* Because the timer mask is tainted by TMM, we don't know the - * true intent of the guest. Here, we assume mask is always - * cleared during WFI. - */ - if (unmask_ctl) { - __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK; - __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK; - } - - kvm_cvm_timers_update(vcpu); -} - -static int tec_exit_reason_notimpl(struct kvm_vcpu *vcpu) -{ - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - pr_err("[vcpu %d] Unhandled exit reason from cvm (ESR: %#llx)\n", - vcpu->vcpu_id, run->tec_exit.esr); - return -ENXIO; -} - -/* The process is the same as kvm_handle_wfx, - * except the tracing and updating operation for pc, - * we copy kvm_handle_wfx process here - * to avoid changing kvm_handle_wfx function. - */ -static int tec_exit_wfx(struct kvm_vcpu *vcpu) -{ - u64 esr = kvm_vcpu_get_esr(vcpu); - - if (esr & ESR_ELx_WFx_ISS_WFE) { - vcpu->stat.wfe_exit_stat++; - } else { - vcpu->stat.wfi_exit_stat++; - } - - if (esr & ESR_ELx_WFx_ISS_WFxT) { - if (esr & ESR_ELx_WFx_ISS_RV) { - u64 val, now; - - now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT); - val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); - - if (now >= val) - goto out; - } else { - /* Treat WFxT as WFx if RN is invalid */ - esr &= ~ESR_ELx_WFx_ISS_WFxT; - } - } - - if (esr & ESR_ELx_WFx_ISS_WFE) { - kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); - } else { - if (esr & ESR_ELx_WFx_ISS_WFxT) - vcpu_set_flag(vcpu, IN_WFIT); - - kvm_vcpu_wfi(vcpu); - } - -out: - return 1; -} - -static int tec_exit_sys_reg(struct kvm_vcpu *vcpu) -{ - int ret; - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - unsigned long esr = kvm_vcpu_get_esr(vcpu); - int rt = kvm_vcpu_sys_get_rt(vcpu); - bool is_write = !(esr & 1); - - if (is_write) - vcpu_set_reg(vcpu, rt, run->tec_exit.gprs[0]); - - ret = kvm_handle_sys_reg(vcpu); - - if (ret >= 0 && !is_write) - run->tec_entry.gprs[0] = vcpu_get_reg(vcpu, rt); - - return ret; -} - -static int tec_exit_sync_dabt(struct kvm_vcpu *vcpu) -{ - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu)) { - vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), - run->tec_exit.gprs[0]); - } - return kvm_handle_guest_abort(vcpu); -} - -static int tec_exit_sync_iabt(struct kvm_vcpu *vcpu) -{ - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - pr_err("[vcpu %d] Unhandled instruction abort (ESR: %#llx).\n", - vcpu->vcpu_id, run->tec_exit.esr); - - return -ENXIO; -} - -static exit_handler_fn tec_exit_handlers[] = { - [0 ... ESR_ELx_EC_MAX] = tec_exit_reason_notimpl, - [ESR_ELx_EC_WFx] = tec_exit_wfx, - [ESR_ELx_EC_SYS64] = tec_exit_sys_reg, - [ESR_ELx_EC_DABT_LOW] = tec_exit_sync_dabt, - [ESR_ELx_EC_IABT_LOW] = tec_exit_sync_iabt -}; - -static int tec_exit_psci(struct kvm_vcpu *vcpu) -{ - int i; - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) - vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]); - - return kvm_psci_call(vcpu); -} - -static int tec_exit_host_call(struct kvm_vcpu *vcpu) -{ - int ret, i; - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - vcpu->stat.hvc_exit_stat++; - - for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) - vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]); - - ret = kvm_smccc_call_handler(vcpu); - - if (ret < 0) { - vcpu_set_reg(vcpu, 0, ~0UL); - ret = 1; - } - for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) - run->tec_entry.gprs[i] = vcpu_get_reg(vcpu, i); - - return ret; -} - -/* - * Return > 0 to return to guest, < 0 on error, 0(and set exit_reason) on - * proper exit to userspace - */ - -int handle_cvm_exit(struct kvm_vcpu *vcpu, int tec_run_ret) -{ - unsigned long status; - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - u8 esr_ec = ESR_ELx_EC(run->tec_exit.esr); - bool is_wfx; - - status = TMI_RETURN_STATUS(tec_run_ret); - - if (status == TMI_ERROR_CVM_POWEROFF) { - vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; - vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN; - return 0; - } - - if (status == TMI_ERROR_CVM_STATE) { - vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; - return 0; - } - - if (tec_run_ret) - return -ENXIO; - - vcpu->arch.fault.esr_el2 = run->tec_exit.esr; - vcpu->arch.fault.far_el2 = run->tec_exit.far; - vcpu->arch.fault.hpfar_el2 = run->tec_exit.hpfar; - - is_wfx = (run->tec_exit.exit_reason == TMI_EXIT_SYNC) && (esr_ec == ESR_ELx_EC_WFx); - update_arch_timer_irq_lines(vcpu, is_wfx); - - run->tec_entry.flags = 0; - - switch (run->tec_exit.exit_reason) { - case TMI_EXIT_FIQ: - case TMI_EXIT_IRQ: - return 1; - case TMI_EXIT_PSCI: - return tec_exit_psci(vcpu); - case TMI_EXIT_SYNC: - return tec_exit_handlers[esr_ec](vcpu); - case TMI_EXIT_HOST_CALL: - return tec_exit_host_call(vcpu); - } - - kvm_pr_unimpl("Unsupported exit reason : 0x%llx\n", - run->tec_exit.exit_reason); - return 0; -} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 4ea07caba71cf26db93de00e22baf387186733b9..647bbd9bad5b70eb6ba2aeb2987c057515c5540c 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -898,6 +898,25 @@ static int do_tag_check_fault(unsigned long far, unsigned long esr, return 0; } +static int do_gpf_ptw(unsigned long far, unsigned long esr, struct pt_regs *regs) +{ + const struct fault_info *inf = esr_to_fault_info(esr); + + die_kernel_fault(inf->name, far, esr, regs); + return 0; +} + +static int do_gpf(unsigned long far, unsigned long esr, struct pt_regs *regs) +{ + const struct fault_info *inf = esr_to_fault_info(esr); + + if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) + return 0; + + arm64_notify_die(inf->name, regs, inf->sig, inf->code, far, esr); + return 0; +} + static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, @@ -934,12 +953,12 @@ static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "unknown 32" }, { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 34" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 35" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 36" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 37" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 38" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 39" }, - { do_bad, SIGKILL, SI_KERNEL, "unknown 40" }, + { do_gpf_ptw, SIGKILL, SI_KERNEL, "Granule Protection Fault at level -1" }, + { do_gpf_ptw, SIGKILL, SI_KERNEL, "Granule Protection Fault at level 0" }, + { do_gpf_ptw, SIGKILL, SI_KERNEL, "Granule Protection Fault at level 1" }, + { do_gpf_ptw, SIGKILL, SI_KERNEL, "Granule Protection Fault at level 2" }, + { do_gpf_ptw, SIGKILL, SI_KERNEL, "Granule Protection Fault at level 3" }, + { do_gpf, SIGBUS, SI_KERNEL, "Granule Protection Fault not on table walk" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 41" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 42" }, { do_bad, SIGKILL, SI_KERNEL, "unknown 43" }, diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 57cd2d1a9a18ecce88321db27e82d9b382efbff9..247b038ff4d9ed9d5465eefc25ad22dbaf6ac676 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -770,7 +770,6 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) return 0; } -#ifdef CONFIG_HISI_VIRTCCA_HOST void arm_pmu_set_phys_irq(bool enable) { int cpu = get_cpu(); @@ -785,7 +784,6 @@ void arm_pmu_set_phys_irq(bool enable) put_cpu(); } -#endif #ifdef CONFIG_CPU_PM static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 9d3f034bd8855e8675529dd667fd8f16398e80e2..b527172e8916d6de66f720e0fc6802f3cde0141c 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -129,6 +129,8 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +void kvm_realm_timers_update(struct kvm_vcpu *vcpu); + u64 kvm_phys_timer_read(void); void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 6e55b9283789b148f76030e63de34de03fc35cc0..bbeb68f031be44cad3e0f846c84c2b1d5e146f02 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -10,6 +10,8 @@ #include #include +#include + #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) #define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fa0542624f42866495ff32d37ca4ae8d192e4baa..ad66b8825c652915f79e7fbd9b4faec02c2cda98 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -259,11 +259,17 @@ union kvm_mmu_notifier_arg { pte_t pte; }; +enum kvm_gfn_range_filter { + KVM_FILTER_SHARED = BIT(0), + KVM_FILTER_PRIVATE = BIT(1), +}; + struct kvm_gfn_range { struct kvm_memory_slot *slot; gfn_t start; gfn_t end; union kvm_mmu_notifier_arg arg; + enum kvm_gfn_range_filter attr_filter; bool may_block; }; bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); @@ -544,28 +550,6 @@ static __always_inline void guest_state_exit_irqoff(void) instrumentation_end(); } -#ifdef CONFIG_HISI_VIRTCCA_HOST - -#define KVM_TYPE_CVM_BIT 8 -#define CVM_MAX_HALT_POLL_NS 100000 - -DECLARE_STATIC_KEY_FALSE(virtcca_cvm_is_available); - -static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu) -{ - if (static_branch_unlikely(&virtcca_cvm_is_available)) - return vcpu->arch.tec.tec_run; - - return false; -} - -static inline bool kvm_arm_cvm_type(unsigned long type) -{ - return type & (1UL << KVM_TYPE_CVM_BIT); -} - -#endif - static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index ac84689cc11cf29b1e8b03a526464890d4da55e3..475c5cdf13c8c5532f66a7fdd0aa919270834f66 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -209,6 +209,7 @@ void kvm_host_pmu_init(struct arm_pmu *pmu); #endif bool arm_pmu_irq_is_nmi(void); +void arm_pmu_set_phys_irq(bool enable); /* Internal functions only for core arm_pmu code */ struct arm_pmu *armpmu_alloc(void); @@ -219,9 +220,13 @@ void armpmu_free_irq(int irq, int cpu); #define ARMV8_PMU_PDEV_NAME "armv8-pmu" +#else /* CONFIG_ARM_PMU */ + +static inline void arm_pmu_set_phys_irq(bool enable) {} + #endif /* CONFIG_ARM_PMU */ -#if defined(CONFIG_ARM_PMU) && defined(CONFIG_HISI_VIRTCCA_HOST) +#if defined(CONFIG_ARM_PMU) void arm_pmu_set_phys_irq(bool enable); #else #define arm_pmu_set_phys_irq(x) do {} while (0) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 0b24b78a78406fa242202b5d3bc25fd3edf03376..99a9c164d02234beffa4e7ca3371ce2c4e6fd33f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -925,14 +925,25 @@ struct kvm_ppc_resize_hpt { #define KVM_S390_SIE_PAGE_OFFSET 1 /* - * On arm64, machine type can be used to request the physical - * address size for the VM. Bits[7-0] are reserved for the guest - * PA size shift (i.e, log2(PA_Size)). For backward compatibility, - * value 0 implies the default IPA size, 40bits. + * On arm64, machine type can be used to request both the machine type and + * the physical address size for the VM. + * + * Bits[11-8] are reserved for the ARM specific machine type. + * + * Bits[7-0] are reserved for the guest PA size shift (i.e, log2(PA_Size)). + * For backward compatibility, value 0 implies the default IPA size, 40bits. */ +#define KVM_VM_TYPE_ARM_SHIFT 8 +#define KVM_VM_TYPE_ARM_MASK (0xfULL << KVM_VM_TYPE_ARM_SHIFT) +#define KVM_VM_TYPE_ARM(_type) \ + (((_type) << KVM_VM_TYPE_ARM_SHIFT) & KVM_VM_TYPE_ARM_MASK) +#define KVM_VM_TYPE_ARM_NORMAL KVM_VM_TYPE_ARM(0) +#define KVM_VM_TYPE_ARM_REALM KVM_VM_TYPE_ARM(1) + #define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL #define KVM_VM_TYPE_ARM_IPA_SIZE(x) \ ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK) + /* * ioctls for /dev/kvm fds: */ @@ -1206,6 +1217,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_COUNTER_OFFSET 227 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_ARM_RME 240 #define KVM_CAP_SEV_ES_GHCB 500 #define KVM_CAP_HYGON_COCO_EXT 501 @@ -1533,25 +1545,6 @@ struct kvm_numa_info { #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) -#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data) - -#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */ - -struct kvm_user_data { - __u64 loader_start; - /* - * When the lowest bit of dtb_info is 0, the value of dtb_info represents the size of the - * DTB, and data_start and data_size represent the address base and size of the MMIO. - * When the lowest bit of dtb_info is 1, data_start and data_size represent the address base - * and size of the DTB. - */ - __u64 dtb_info; - __u64 data_start; - __u64 data_size; - __u64 ram_size; - struct kvm_numa_info numa_info; -}; - /* enable ucontrol for s390 */ struct kvm_s390_ucas_mapping { __u64 user_addr; @@ -2457,4 +2450,13 @@ struct kvm_csv3_handle_memory { /* get tmi version */ #define KVM_GET_TMI_VERSION _IOR(KVMIO, 0xd2, u64) +/* Available with KVM_CAP_ARM_RME, only for VMs with KVM_VM_TYPE_ARM_REALM */ +struct kvm_arm_rmm_psci_complete { + __u64 target_mpidr; + __u32 psci_status; + __u32 padding[3]; +}; + +#define KVM_ARM_VCPU_RMM_PSCI_COMPLETE _IOW(KVMIO, 0xd6, struct kvm_arm_rmm_psci_complete) + #endif /* __LINUX_KVM_H */ diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 79f73ebb728df2df7b855d9c5e8f771ecd04fa8a..9e351bce483ede3c0da2336a2304a015834fc663 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -633,6 +633,11 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, */ gfn_range.arg = range->arg; gfn_range.may_block = range->may_block; + /* + * HVA-based notifications aren't relevant to private + * mappings as they don't have a userspace mapping. + */ + gfn_range.attr_filter = KVM_FILTER_SHARED; /* * {gfn(page) | page intersects with [hva_start, hva_end)} =