From 332f991ccf00340d7cec7e581a59645f52344521 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 18 May 2022 18:16:57 +0800 Subject: [PATCH 1/2] io_uring: fix race between timeout flush and removal commit:b94073254dd632c9a1069cd6320e305441127325 CVE:CVE-2022-29582 Signed-off-by: wanxiaoqing ----------------------------------------------------------- stable inclusion from stable-v5.10.110 commit 2827328e646d0c2d3db1bfcad4b5f5016ce0d643 category: bugfix bugzilla: 186670, https://gitee.com/src-openeuler/kernel/issues/I54H78 CVE: CVE-2022-29582 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.19.y&id=2827328e646d0c2d3db1bfcad4b5f5016ce0d643 -------------------------------- commit e677edbcabee849bfdd43f1602bccbecf736a646 upstream. io_flush_timeouts() assumes the timeout isn't in progress of triggering or being removed/canceled, so it unconditionally removes it from the timeout list and attempts to cancel it. Leave it on the list and let the normal timeout cancelation take care of it. Cc: stable@vger.kernel.org # 5.5+ Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman Conflicts: fs/io_uring.c Signed-off-by: Guo Xuenan Reviewed-by: Zhang Yi Signed-off-by: Zheng Zengkai Signed-off-by: wanxiaoqing --- fs/io_uring.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 545b1d8f15db..4f532ce5d0b4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1553,6 +1553,7 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx) static void io_flush_timeouts(struct io_ring_ctx *ctx) { + struct io_kiocb *req, *tmp; u32 seq; if (list_empty(&ctx->timeout_list)) @@ -1560,10 +1561,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); - do { + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { u32 events_needed, events_got; - struct io_kiocb *req = list_first_entry(&ctx->timeout_list, - struct io_kiocb, timeout.list); if (io_is_timeout_noseq(req)) break; @@ -1580,9 +1579,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) if (events_got < events_needed) break; - list_del_init(&req->timeout.list); io_kill_timeout(req, 0); - } while (!list_empty(&ctx->timeout_list)); + } ctx->cq_last_tm_flush = seq; } @@ -5586,6 +5584,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, else data->mode = HRTIMER_MODE_REL; + INIT_LIST_HEAD(&req->timeout.list); hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode); return 0; } @@ -6234,6 +6233,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) prev = NULL; } + list_del(&req->timeout.list); spin_unlock_irqrestore(&ctx->completion_lock, flags); if (prev) { -- Gitee From 885d4e3cc49eb94c973c3c8448d366d7b012bb4c Mon Sep 17 00:00:00 2001 From: Mingwei Zhang Date: Tue, 7 Jun 2022 17:43:48 +0800 Subject: [PATCH 2/2] KVM: SEV: add cache flush to solve SEV cache incoherency issues commit:5e0a056024247f076ad9c18bf2e61e945f64f4e1 CVE:CVE-2022-0171 Signed-off-by: wanxiaoqing ----------------------------------------------------------- mainline inclusion from mainline-v5.18-rc4 commit 683412ccf61294d727ead4a73d97397396e69a6b category: bugfix bugzilla: https://gitee.com/src-openeuler/kernel/issues/I58DFI CVE: CVE-2022-0171 Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=683412ccf61294d727ead4a73d97397396e69a6b -------------------------------- Flush the CPU caches when memory is reclaimed from an SEV guest (where reclaim also includes it being unmapped from KVM's memslots). Due to lack of coherency for SEV encrypted memory, failure to flush results in silent data corruption if userspace is malicious/broken and doesn't ensure SEV guest memory is properly pinned and unpinned. Cache coherency is not enforced across the VM boundary in SEV (AMD APM vol.2 Section 15.34.7). Confidential cachelines, generated by confidential VM guests have to be explicitly flushed on the host side. If a memory page containing dirty confidential cachelines was released by VM and reallocated to another user, the cachelines may corrupt the new user at a later time. KVM takes a shortcut by assuming all confidential memory remain pinned until the end of VM lifetime. Therefore, KVM does not flush cache at mmu_notifier invalidation events. Because of this incorrect assumption and the lack of cache flushing, malicous userspace can crash the host kernel: creating a malicious VM and continuously allocates/releases unpinned confidential memory pages when the VM is running. Add cache flush operations to mmu_notifier operations to ensure that any physical memory leaving the guest VM get flushed. In particular, hook mmu_notifier_invalidate_range_start and mmu_notifier_release events and flush cache accordingly. The hook after releasing the mmu lock to avoid contention with other vCPUs. Cc: stable@vger.kernel.org Suggested-by: Sean Christpherson Reported-by: Mingwei Zhang Signed-off-by: Mingwei Zhang Message-Id: <20220421031407.2516575-4-mizhang@google.com> Signed-off-by: Paolo Bonzini conflicts: arch/x86/include/asm/kvm-x86-ops.h arch/x86/kvm/x86.c virt/kvm/kvm_main.c Signed-off-by: Chen Jiahao Reviewed-by: Liao Chang Reviewed-by: Zhang Jianhua Signed-off-by: Zheng Zengkai Signed-off-by: wanxiaoqing --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm/sev.c | 8 ++++++++ arch/x86/kvm/svm/svm.c | 1 + arch/x86/kvm/svm/svm.h | 2 ++ arch/x86/kvm/x86.c | 8 ++++++++ include/linux/kvm_host.h | 2 ++ virt/kvm/kvm_main.c | 16 ++++++++++++++-- 7 files changed, 36 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b1cd8334db11..6ef0074e5306 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1272,6 +1272,7 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + void (*guest_memory_reclaimed)(struct kvm *kvm); int (*get_msr_feature)(struct kvm_msr_entry *entry); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 01547bdbfb06..783ef0eb2b49 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1177,6 +1177,14 @@ void sev_hardware_teardown(void) sev_flush_asids(); } +void sev_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!sev_guest(kvm)) + return; + + wbinvd_on_all_cpus(); +} + void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu(svm_data, cpu); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1c9226cd6cde..cc9f950c206d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4315,6 +4315,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .mem_enc_op = svm_mem_enc_op, .mem_enc_reg_region = svm_register_enc_region, .mem_enc_unreg_region = svm_unregister_enc_region, + .guest_memory_reclaimed = sev_guest_memory_reclaimed, .can_emulate_instruction = svm_can_emulate_instruction, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index be74e22b82ea..76e3b95343be 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -491,6 +491,8 @@ int svm_register_enc_region(struct kvm *kvm, struct kvm_enc_region *range); int svm_unregister_enc_region(struct kvm *kvm, struct kvm_enc_region *range); +void sev_guest_memory_reclaimed(struct kvm *kvm); + void pre_sev_run(struct vcpu_svm *svm, int cpu); int __init sev_hardware_setup(void); void sev_hardware_teardown(void); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ef02d23a8114..5243e00871c8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8819,6 +8819,14 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); } +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!kvm_x86_ops.guest_memory_reclaimed) + return; + + kvm_x86_ops.guest_memory_reclaimed(kvm); +} + void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) { if (!lapic_in_kernel(vcpu)) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c66c702a4f07..4d3d918f0049 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1463,6 +1463,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); + #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); #else diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2809a69418a6..7b2d4318ccb6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -157,6 +157,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, { } +__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) +{ +} + bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) { /* @@ -333,6 +337,12 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); #endif +static void kvm_flush_shadow_all(struct kvm *kvm) +{ + kvm_arch_flush_shadow_all(kvm); + kvm_arch_guest_memory_reclaimed(kvm); +} + void kvm_reload_remote_mmus(struct kvm *kvm) { kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); @@ -487,6 +497,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); + kvm_arch_guest_memory_reclaimed(kvm); srcu_read_unlock(&kvm->srcu, idx); return 0; @@ -590,7 +601,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, int idx; idx = srcu_read_lock(&kvm->srcu); - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); srcu_read_unlock(&kvm->srcu, idx); } @@ -870,7 +881,7 @@ static void kvm_destroy_vm(struct kvm *kvm) #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); #else - kvm_arch_flush_shadow_all(kvm); + kvm_flush_shadow_all(kvm); #endif kvm_arch_destroy_vm(kvm); kvm_destroy_devices(kvm); @@ -1211,6 +1222,7 @@ static int kvm_set_memslot(struct kvm *kvm, * - kvm_is_visible_gfn (mmu_check_root) */ kvm_arch_flush_shadow_memslot(kvm, slot); + kvm_arch_guest_memory_reclaimed(kvm); } r = kvm_arch_prepare_memory_region(kvm, new, mem, change); -- Gitee