From dc2e509af58213e57140753664a34ba838cca793 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 25 Mar 2022 14:21:40 +0100 Subject: [PATCH 1/4] KVM: x86: Forbid VMM to set SYNIC/STIMER MSRs when SynIC wasn't activated stable inclusion from stable-5.10.110 commit 09c771c45c1243e295470225aaee726693fdc242 category: bugfix issue: I5AFE6 CVE: CVE-2022-2153 Signed-off-by: lizongfeng --------------------------------------- KVM: x86: Forbid VMM to set SYNIC/STIMER MSRs when SynIC wasn't activated commit b1e34d325397a33d97d845e312d7cf2a8b646b44 upstream. Setting non-zero values to SYNIC/STIMER MSRs activates certain features, this should not happen when KVM_CAP_HYPERV_SYNIC{,2} was not activated. Note, it would've been better to forbid writing anything to SYNIC/STIMER MSRs, including zeroes, however, at least QEMU tries clearing HV_X64_MSR_STIMER0_CONFIG without SynIC. HV_X64_MSR_EOM MSR is somewhat 'special' as writing zero there triggers an action, this also should not happen when SynIC wasn't activated. Signed-off-by: Vitaly Kuznetsov Message-Id: <20220325132140.25650-4-vkuznets@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/hyperv.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index bb39f493447c..2673a34a2392 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -207,7 +207,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, struct kvm_vcpu *vcpu = synic_to_vcpu(synic); int ret; - if (!synic->active && !host) + if (!synic->active && (!host || data)) return 1; trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); @@ -253,6 +253,9 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, case HV_X64_MSR_EOM: { int i; + if (!synic->active) + break; + for (i = 0; i < ARRAY_SIZE(synic->sint); i++) kvm_hv_notify_acked_sint(vcpu, i); break; @@ -636,7 +639,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); - if (!synic->active && !host) + if (!synic->active && (!host || config)) return 1; trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, @@ -660,7 +663,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); - if (!synic->active && !host) + if (!synic->active && (!host || count)) return 1; trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, -- Gitee From 4c658e431a08ed71c084ee08fdc93bd1b607d8e5 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 23 May 2022 09:11:49 -0700 Subject: [PATCH 2/4] x86/speculation/mmio: Print SMT warning stable inclusion from stable-5.10.123 commit aa238a92cc94a15812c0de4adade86ba8f22707a category: bugfix issue: I5AFE6 CVE: CVE-2022-21125 Signed-off-by: lizongfeng --------------------------------------- x86/speculation/mmio: Print SMT warning commit 1dc6ff02c8bf77d71b9b5d11cbc9df77cfb28626 upstream Similar to MDS and TAA, print a warning if SMT is enabled for the MMIO Stale Data vulnerability. Signed-off-by: Josh Poimboeuf Signed-off-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 78b9514a3844..de39f73e0051 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1091,6 +1091,7 @@ static void update_mds_branch_idle(void) #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" +#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" void cpu_bugs_smt_update(void) { @@ -1135,6 +1136,16 @@ void cpu_bugs_smt_update(void) break; } + switch (mmio_mitigation) { + case MMIO_MITIGATION_VERW: + case MMIO_MITIGATION_UCODE_NEEDED: + if (sched_smt_active()) + pr_warn_once(MMIO_MSG_SMT); + break; + case MMIO_MITIGATION_OFF: + break; + } + mutex_unlock(&spec_ctrl_mutex); } -- Gitee From 85fbb9218ffff7babfaf753317a30775f475a71a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 9 Feb 2021 11:20:27 -0800 Subject: [PATCH 3/4] tcp: change source port randomizarion at connect() time stable inclusion from stable-5.10.119 commit 33f1b4a27abced7ae0f740d2ec3040debf7c4b3c category: bugfix issue: I5AFE6 CVE: CVE-2022-32296 Signed-off-by: lizongfeng --------------------------------------- tcp: change source port randomizarion at connect() time commit 190cc82489f46f9d88e73c81a47e14f80a791e1a upstream. RFC 6056 (Recommendations for Transport-Protocol Port Randomization) provides good summary of why source selection needs extra care. David Dworken reminded us that linux implements Algorithm 3 as described in RFC 6056 3.3.3 Quoting David : In the context of the web, this creates an interesting info leak where websites can count how many TCP connections a user's computer is establishing over time. For example, this allows a website to count exactly how many subresources a third party website loaded. This also allows: - Distinguishing between different users behind a VPN based on distinct source port ranges. - Tracking users over time across multiple networks. - Covert communication channels between different browsers/browser profiles running on the same computer - Tracking what applications are running on a computer based on the pattern of how fast source ports are getting incremented. Section 3.3.4 describes an enhancement, that reduces attackers ability to use the basic information currently stored into the shared 'u32 hint'. This change also decreases collision rate when multiple applications need to connect() to different destinations. Signed-off-by: Eric Dumazet Reported-by: David Dworken Cc: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Stefan Ghinea Signed-off-by: Greg Kroah-Hartman --- net/ipv4/inet_hashtables.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 91fbfc8cada8..36b3dad5fe2f 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -711,6 +711,17 @@ void inet_unhash(struct sock *sk) } EXPORT_SYMBOL_GPL(inet_unhash); +/* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm + * Note that we use 32bit integers (vs RFC 'short integers') + * because 2^16 is not a multiple of num_ephemeral and this + * property might be used by clever attacker. + * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, + * we use 256 instead to really give more isolation and + * privacy, this only consumes 1 KB of kernel memory. + */ +#define INET_TABLE_PERTURB_SHIFT 8 +static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; + int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u64 port_offset, int (*check_established)(struct inet_timewait_death_row *, @@ -724,8 +735,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct inet_bind_bucket *tb; u32 remaining, offset; int ret, i, low, high; - static u32 hint; int l3mdev; + u32 index; if (port) { head = &hinfo->bhash[inet_bhashfn(net, port, @@ -752,7 +763,10 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, if (likely(remaining > 1)) remaining &= ~1U; - offset = hint + port_offset; + net_get_random_once(table_perturb, sizeof(table_perturb)); + index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT); + + offset = READ_ONCE(table_perturb[index]) + port_offset; offset %= remaining; /* In first pass we try ports of @low parity. * inet_csk_get_port() does the opposite choice. @@ -807,7 +821,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, return -EADDRNOTAVAIL; ok: - hint += i + 2; + WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); /* Head lock still held and bh's disabled */ inet_bind_hash(sk, tb, port); -- Gitee From 6de575a2892d1f610d0629afb7dedc5836c75fb1 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Mon, 2 May 2022 10:46:13 +0200 Subject: [PATCH 4/4] tcp: increase source port perturb table to 2^16 stable inclusion from stable-5.10.125 commit 9429b75bc271b6f29e50dbb0ee0751800ff87dd9 category: bugfix issue: I5AFE6 CVE: CVE-2022-32296 Signed-off-by: lizongfeng --------------------------------------- tcp: increase source port perturb table to 2^16 commit 4c2c8f03a5ab7cb04ec64724d7d176d00bcc91e5 upstream. Moshe Kol, Amit Klein, and Yossi Gilad reported being able to accurately identify a client by forcing it to emit only 40 times more connections than there are entries in the table_perturb[] table. The previous two improvements consisting in resalting the secret every 10s and adding randomness to each port selection only slightly improved the situation, and the current value of 2^8 was too small as it's not very difficult to make a client emit 10k connections in less than 10 seconds. Thus we're increasing the perturb table from 2^8 to 2^16 so that the same precision now requires 2.6M connections, which is more difficult in this time frame and harder to hide as a background activity. The impact is that the table now uses 256 kB instead of 1 kB, which could mostly affect devices making frequent outgoing connections. However such components usually target a small set of destinations (load balancers, database clients, perf assessment tools), and in practice only a few entries will be visited, like before. A live test at 1 million connections per second showed no performance difference from the previous value. Reported-by: Moshe Kol Reported-by: Yossi Gilad Reported-by: Amit Klein Reviewed-by: Eric Dumazet Signed-off-by: Willy Tarreau Signed-off-by: Jakub Kicinski Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- net/ipv4/inet_hashtables.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 36b3dad5fe2f..7aa86a4409fc 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -715,11 +715,12 @@ EXPORT_SYMBOL_GPL(inet_unhash); * Note that we use 32bit integers (vs RFC 'short integers') * because 2^16 is not a multiple of num_ephemeral and this * property might be used by clever attacker. - * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, - * we use 256 instead to really give more isolation and - * privacy, this only consumes 1 KB of kernel memory. + * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though + * attacks were since demonstrated, thus we use 65536 instead to really + * give more isolation and privacy, at the expense of 256kB of kernel + * memory. */ -#define INET_TABLE_PERTURB_SHIFT 8 +#define INET_TABLE_PERTURB_SHIFT 16 static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; int __inet_hash_connect(struct inet_timewait_death_row *death_row, -- Gitee