diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index f712ec22001bd359dc329ef6c58a611dfd48e110..465d28f5480e9139068a7dcdd46725dbee8fb7a1 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -766,6 +766,7 @@ CONFIG_HAVE_KVM_IRQ_BYPASS=y CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y CONFIG_KVM_XFER_TO_GUEST_WORK=y CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_HAVE_KVM_PINNED_VMID=y CONFIG_KVM_HISI_VIRT=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=y diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 81898bb87c5e999375bfd9e23c80f48b976509aa..8335e84a8d0989430a7059c4aa36b28c970b01bb 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -143,6 +143,7 @@ int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages); struct kvm_vmid { atomic64_t id; + KABI_EXTEND(refcount_t pinned) }; struct kvm_s2_mmu { @@ -1106,6 +1107,8 @@ int __init kvm_arm_vmid_alloc_init(void); void __init kvm_arm_vmid_alloc_free(void); void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); void kvm_arm_vmid_clear_active(void); +unsigned long kvm_arm_pinned_vmid_get(struct kvm_vmid *kvm_vmid); +void kvm_arm_pinned_vmid_put(struct kvm_vmid *kvm_vmid); static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) { diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 52edbd7f6340cdb39d257a4834c4cf7997247231..d4740f693fdf2c56a23699f694add538d2551581 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -44,6 +44,7 @@ menuconfig KVM select GUEST_PERF_EVENTS if PERF_EVENTS select INTERVAL_TREE select XARRAY_MULTI + select HAVE_KVM_PINNED_VMID help Support hosting virtualized guest machines. diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index d0d4e6bdc06bb716627ca67a6a1a3dcde1a99a75..08cfe550c155c3af27aff331f95441563294556f 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -892,6 +892,20 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) return ret; } +int kvm_arch_pinned_vmid_get(struct kvm *kvm) +{ + int vmid; + + vmid = kvm_arm_pinned_vmid_get(&kvm->arch.mmu.vmid); + + return (vmid == 0) ? -EINVAL : vmid; +} + +void kvm_arch_pinned_vmid_put(struct kvm *kvm) +{ + kvm_arm_pinned_vmid_put(&kvm->arch.mmu.vmid); +} + bool kvm_arch_intc_initialized(struct kvm *kvm) { return vgic_initialized(kvm); diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 7fe8ba1a2851c5b71acbf17075987b96436f1a4a..4e956aaa575e44090932f888191d2dc4033b0642 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -25,6 +25,10 @@ static unsigned long *vmid_map; static DEFINE_PER_CPU(atomic64_t, active_vmids); static DEFINE_PER_CPU(u64, reserved_vmids); +static unsigned long max_pinned_vmids; +static unsigned long nr_pinned_vmids; +static unsigned long *pinned_vmid_map; + #define VMID_MASK (~GENMASK(kvm_arm_vmid_bits - 1, 0)) #define VMID_FIRST_VERSION (1UL << kvm_arm_vmid_bits) @@ -47,7 +51,10 @@ static void flush_context(void) int cpu; u64 vmid; - bitmap_zero(vmid_map, NUM_USER_VMIDS); + if (pinned_vmid_map) + bitmap_copy(vmid_map, pinned_vmid_map, NUM_USER_VMIDS); + else + bitmap_zero(vmid_map, NUM_USER_VMIDS); for_each_possible_cpu(cpu) { vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); @@ -103,6 +110,14 @@ static u64 new_vmid(struct kvm_vmid *kvm_vmid) return newvmid; } + /* + * If it is pinned, we can keep using it. Note that reserved + * takes priority, because even if it is also pinned, we need to + * update the generation into the reserved_vmids. + */ + if (refcount_read(&kvm_vmid->pinned)) + return newvmid; + if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) { atomic64_set(&kvm_vmid->id, newvmid); return newvmid; @@ -169,6 +184,63 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); } +unsigned long kvm_arm_pinned_vmid_get(struct kvm_vmid *kvm_vmid) +{ + unsigned long flags; + u64 vmid; + + if (!pinned_vmid_map) + return 0; + + raw_spin_lock_irqsave(&cpu_vmid_lock, flags); + + vmid = atomic64_read(&kvm_vmid->id); + + if (refcount_inc_not_zero(&kvm_vmid->pinned)) + goto out_unlock; + + if (nr_pinned_vmids >= max_pinned_vmids) { + vmid = 0; + goto out_unlock; + } + + /* + * If we went through one or more rollover since that VMID was + * used, make sure it is still valid, or generate a new one. + */ + if (!vmid_gen_match(vmid)) + vmid = new_vmid(kvm_vmid); + + nr_pinned_vmids++; + __set_bit(vmid2idx(vmid), pinned_vmid_map); + refcount_set(&kvm_vmid->pinned, 1); + +out_unlock: + raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); + + vmid &= ~VMID_MASK; + + return vmid; +} + +void kvm_arm_pinned_vmid_put(struct kvm_vmid *kvm_vmid) +{ + unsigned long flags; + u64 vmid = atomic64_read(&kvm_vmid->id); + + if (!pinned_vmid_map) + return; + + raw_spin_lock_irqsave(&cpu_vmid_lock, flags); + + if (refcount_dec_and_test(&kvm_vmid->pinned)) { + __clear_bit(vmid2idx(vmid), pinned_vmid_map); + nr_pinned_vmids--; + } + + raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); +} + /* * Initialize the VMID allocator */ @@ -186,10 +258,20 @@ int __init kvm_arm_vmid_alloc_init(void) if (!vmid_map) return -ENOMEM; + pinned_vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL); + nr_pinned_vmids = 0; + + /* + * Ensure we have at least one empty slot available after rollover + * and maximum number of VMIDs are pinned. VMID#0 is reserved. + */ + max_pinned_vmids = NUM_USER_VMIDS - num_possible_cpus() - 2; + return 0; } void __init kvm_arm_vmid_alloc_free(void) { + bitmap_free(pinned_vmid_map); bitmap_free(vmid_map); } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index ccab57f36c62de2ce41ed3a07e6b87141b625f42..0215485560838fda3868b22093d4a9f1ece4cc36 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2220,6 +2220,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) static struct iommu_domain * amd_iommu_domain_alloc_user(struct device *dev, u32 flags, struct iommu_domain *parent, + struct kvm *kvm, const struct iommu_user_data *user_data) { diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 7fe05fea676a95d6373728e6f9ae5127d015707b..987ff896e802e0df09846575f6e76c740835b721 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -2612,9 +2613,13 @@ static void arm_smmu_domain_free_paging(struct iommu_domain *domain) xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); mutex_unlock(&arm_smmu_asid_lock); } else { - struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; - if (cfg->vmid) - ida_free(&smmu->vmid_map, cfg->vmid); + if (smmu_domain->iommufd_kvm) { + kvm_pinned_vmid_put(smmu_domain->iommufd_kvm); + } else { + struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; + if (cfg->vmid) + ida_free(&smmu->vmid_map, cfg->vmid); + } } kfree(smmu_domain); @@ -2642,9 +2647,13 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, int vmid; struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; - /* Reserve VMID 0 for stage-2 bypass STEs */ - vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1, - GFP_KERNEL); + if (smmu_domain->iommufd_kvm) { + vmid = kvm_pinned_vmid_get(smmu_domain->iommufd_kvm); + } else { + /* Reserve VMID 0 for stage-2 bypass STEs */ + vmid = ida_alloc_range(&smmu->vmid_map, 1, + (1 << smmu->vmid_bits) - 1, GFP_KERNEL); + } if (vmid < 0) return vmid; @@ -3361,6 +3370,7 @@ static struct iommu_domain arm_smmu_blocked_domain = { static struct iommu_domain * arm_smmu_domain_alloc_user(struct device *dev, u32 flags, struct iommu_domain *parent, + struct kvm *kvm, const struct iommu_user_data *user_data) { struct arm_smmu_master *master = dev_iommu_priv_get(dev); @@ -3385,6 +3395,7 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags, } smmu_domain->stage = ARM_SMMU_DOMAIN_S2; smmu_domain->nest_parent = true; + smmu_domain->iommufd_kvm = kvm; } smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED; @@ -4643,11 +4654,14 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) writel_relaxed(reg, smmu->base + ARM_SMMU_CR1); /* CR2 (random crap) */ - reg = CR2_PTM | CR2_RECINVSID; + reg = CR2_RECINVSID; if (smmu->features & ARM_SMMU_FEAT_E2H) reg |= CR2_E2H; + if (!(smmu->features & ARM_SMMU_FEAT_BTM)) + reg |= CR2_PTM; + writel_relaxed(reg, smmu->base + ARM_SMMU_CR2); /* Stream table */ @@ -4982,6 +4996,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) { u32 reg; bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY; + bool vhe = cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN); /* IDR0 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0); @@ -5034,7 +5049,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (reg & IDR0_HYP) { smmu->features |= ARM_SMMU_FEAT_HYP; - if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN)) + if (vhe) smmu->features |= ARM_SMMU_FEAT_E2H; } @@ -5061,6 +5076,21 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (reg & IDR0_S2P) smmu->features |= ARM_SMMU_FEAT_TRANS_S2; + /* + * If S1 is supported, check we can enable BTM. This means if S2 is available, + * we will use S2 for nested domain only with a KVM VMID. BTM is useful when + * CPU shares the page tables with SMMUv3(eg: vSVA) + */ + if (reg & IDR0_S1P) { + /* + * If the CPU is using VHE, but the SMMU doesn't support it, the SMMU + * will create TLB entries for NH-EL1 world and will miss the + * broadcasted TLB invalidations that target EL2-E2H world. Don't enable + * BTM in that case. + */ + if (reg & IDR0_BTM && (!vhe || reg & IDR0_HYP)) + smmu->features |= ARM_SMMU_FEAT_BTM; + } if (!(reg & (IDR0_S1P | IDR0_S2P))) { dev_err(smmu->dev, "no translation support!\n"); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 38d46098c668b6dcb0dde44b1b6152ddbee9e748..164afb978f82480af11fe366b860f7c6e1b18a61 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -883,6 +883,7 @@ struct arm_smmu_domain { struct list_head node; struct kvm *kvm; #endif + KABI_EXTEND(struct kvm *iommufd_kvm) }; struct arm_smmu_nested_domain { diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 8169b4a3d7a05bea2fc794b736b4e95859f99180..0be8cb24b92561c1a42c66775cc16534964149ea 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4001,6 +4001,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) static struct iommu_domain * intel_iommu_domain_alloc_user(struct device *dev, u32 flags, struct iommu_domain *parent, + struct kvm *kvm, const struct iommu_user_data *user_data) { struct device_domain_info *info = dev_iommu_priv_get(dev); diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index c92e575cf01e96f38c6355767c7656371e058d63..5f75cbc255f3bc54396c4d5578e5c55f70a13945 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -140,7 +140,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, if (ops->domain_alloc_user) { hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, - user_data); + ictx->kvm, user_data); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); hwpt->domain = NULL; @@ -241,7 +241,8 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx, hwpt->domain = ops->domain_alloc_user(idev->dev, flags & ~IOMMU_HWPT_FAULT_ID_VALID, - parent->common.domain, user_data); + parent->common.domain, + ictx->kvm, user_data); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); hwpt->domain = NULL; diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 85a2d4c1cd8384c2e49ebe39944163bd93620d67..7f1193f3b638c0222a12cdee8a7eb339fbdbd2e8 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -18,6 +18,7 @@ struct iommu_domain; struct iommu_group; struct iommu_option; struct iommufd_device; +struct kvm; struct iommufd_ctx { struct file *file; @@ -29,6 +30,8 @@ struct iommufd_ctx { /* Compatibility with VFIO no iommu */ u8 no_iommu_mode; struct iommufd_ioas *vfio_ioas; + /* Associated KVM pointer */ + KABI_EXTEND(struct kvm *kvm) }; /* diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index 3872abbd87293f61a264cf1e2fef0216e72b57fc..0be8d7279bf2ef527b4733f88ce9fa5bd0026366 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -478,6 +478,20 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx) } EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, IOMMUFD); +/** + * iommufd_ctx_set_kvm - Called to set a KVM pointer to iommufd context + * @ictx: Context to operate on + * @kvm: KVM pointer with a reference taken using kvm_get_kvm_safe() + */ +void iommufd_ctx_set_kvm(struct iommufd_ctx *ictx, struct kvm *kvm) +{ + xa_lock(&ictx->objects); + if (!ictx->kvm) + ictx->kvm = kvm; + xa_unlock(&ictx->objects); +} +EXPORT_SYMBOL_NS_GPL(iommufd_ctx_set_kvm, IOMMUFD); + static const struct iommufd_object_ops iommufd_object_ops[] = { [IOMMUFD_OBJ_ACCESS] = { .destroy = iommufd_access_destroy_object, diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 8e727bdca87792f39f60ebc4e7876c4c6cb845dd..34500385b713b860bdbee8e1e04a090d59684a93 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -381,6 +381,7 @@ mock_domain_alloc_nested(struct iommu_domain *parent, u32 flags, static struct iommu_domain * mock_domain_alloc_user(struct device *dev, u32 flags, struct iommu_domain *parent, + struct kvm *kvm, const struct iommu_user_data *user_data) { bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; diff --git a/drivers/vfio/device_cdev.c b/drivers/vfio/device_cdev.c index e75da0a70d1f838a1d611e89fa6102876f2c5fba..e75e96fb57cbc852e9354f91496289a626bf3a28 100644 --- a/drivers/vfio/device_cdev.c +++ b/drivers/vfio/device_cdev.c @@ -101,6 +101,9 @@ long vfio_df_ioctl_bind_iommufd(struct vfio_device_file *df, */ vfio_df_get_kvm_safe(df); + if (df->kvm) + iommufd_ctx_set_kvm(df->iommufd, df->kvm); + ret = vfio_df_open(df); if (ret) goto out_put_kvm; diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h index bde84ad344e50181685f5fbc2620c20b7b33f5a0..488b815db970e60e084dcefaeefdca6249f794db 100644 --- a/drivers/vfio/vfio.h +++ b/drivers/vfio/vfio.h @@ -85,6 +85,8 @@ struct vfio_group { struct list_head vfio_next; #if IS_ENABLED(CONFIG_VFIO_CONTAINER) struct list_head container_next; +#else + KABI_DEPRECATE(struct list_head, container_next) #endif enum vfio_group_type type; struct mutex group_lock; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 83ec4bf9809eca4761f2226d9b054762a8d1d29e..73239dedac8089aeee399de401b5020049f88e7f 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -682,9 +682,12 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); - struct iommu_domain *(*domain_alloc_user)( - struct device *dev, u32 flags, struct iommu_domain *parent, - const struct iommu_user_data *user_data); + KABI_REPLACE(struct iommu_domain *(*domain_alloc_user)( + struct device *dev, u32 flags, struct iommu_domain *parent, + const struct iommu_user_data *user_data), + struct iommu_domain *(*domain_alloc_user)( + struct device *dev, u32 flags, struct iommu_domain *parent, + struct kvm *kvm, const struct iommu_user_data *user_data)) struct iommu_domain *(*domain_alloc_paging)(struct device *dev); struct iommu_device *(*probe_device)(struct device *dev); diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h index 11110c74920090a2a586b97a0140ac76e203bc0a..f3d921b8222f5968508c82f4a91b3c8c244f6bcb 100644 --- a/include/linux/iommufd.h +++ b/include/linux/iommufd.h @@ -22,6 +22,7 @@ struct iommufd_ctx; struct iommufd_device; struct iommufd_viommu_ops; struct page; +struct kvm; enum iommufd_object_type { IOMMUFD_OBJ_NONE, @@ -128,6 +129,7 @@ struct iommufd_ctx *iommufd_ctx_from_file(struct file *file); struct iommufd_ctx *iommufd_ctx_from_fd(int fd); void iommufd_ctx_put(struct iommufd_ctx *ictx); bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group); +void iommufd_ctx_set_kvm(struct iommufd_ctx *ictx, struct kvm *kvm); int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, unsigned long length, struct page **out_pages, @@ -149,6 +151,11 @@ static inline void iommufd_ctx_put(struct iommufd_ctx *ictx) { } +static inline void iommufd_ctx_set_kvm(struct iommufd_ctx *ictx, + struct kvm *kvm) +{ +} + static inline int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, unsigned long length, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 3968797272b94c81b5f2b1726fc0a3313bdc4f86..b1345bff7f7c4dd240f3ab461a4f84cc49acc8a1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -2408,6 +2408,21 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) } #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ +#ifdef CONFIG_HAVE_KVM_PINNED_VMID +int kvm_pinned_vmid_get(struct kvm *kvm); +void kvm_pinned_vmid_put(struct kvm *kvm); +int kvm_arch_pinned_vmid_get(struct kvm *kvm); +void kvm_arch_pinned_vmid_put(struct kvm *kvm); +#else +static inline int kvm_pinned_vmid_get(struct kvm *kvm) +{ + return -EINVAL; +} + +static inline void kvm_pinned_vmid_put(struct kvm *kvm) +{ +} +#endif /* * If more than one page is being (un)accounted, @virt must be the address of * the first page of a block of pages what were allocated together (i.e diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 484d0873061ca5041c546f1c44520a1848ec1d16..7e19e8ada12150983f2f8bc9be3811c92981f4fa 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -92,3 +92,6 @@ config HAVE_KVM_PM_NOTIFIER config KVM_GENERIC_HARDWARE_ENABLING bool + +config HAVE_KVM_PINNED_VMID + bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4b7378445812f2f02e2f4291a9a45ba7f1d20b8d..59f406068dd67c232ebd156a64e8657bc7da2a16 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3659,6 +3659,29 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up); +#ifdef CONFIG_HAVE_KVM_PINNED_VMID +int kvm_pinned_vmid_get(struct kvm *kvm) +{ + int ret; + + if (!kvm_get_kvm_safe(kvm)) + return -ENOENT; + ret = kvm_arch_pinned_vmid_get(kvm); + if (ret < 0) + kvm_put_kvm(kvm); + + return ret; +} +EXPORT_SYMBOL_GPL(kvm_pinned_vmid_get); + +void kvm_pinned_vmid_put(struct kvm *kvm) +{ + kvm_arch_pinned_vmid_put(kvm); + kvm_put_kvm(kvm); +} +EXPORT_SYMBOL_GPL(kvm_pinned_vmid_put); +#endif + #ifndef CONFIG_S390 /* * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.