From 6fe893111ccb50106efc12c5aeb4f17100d0004e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 29 Aug 2023 22:53:52 +0200 Subject: [PATCH 1/7] bpf: Annotate bpf_long_memcpy with data_race mainline inclusion from mainline-v6.6-rc1 commit 6a86b5b5cd76d2734304a0173f5f01aa8aa2025e category: bugfix issue: NA CVE: CVE-2023-52521 Signed-off-by: yaowenrui --------------------------------------- syzbot reported a data race splat between two processes trying to update the same BPF map value via syscall on different CPUs: BUG: KCSAN: data-race in bpf_percpu_array_update / bpf_percpu_array_update write to 0xffffe8fffe7425d8 of 8 bytes by task 8257 on cpu 1: bpf_long_memcpy include/linux/bpf.h:428 [inline] bpf_obj_memcpy include/linux/bpf.h:441 [inline] copy_map_value_long include/linux/bpf.h:464 [inline] bpf_percpu_array_update+0x3bb/0x500 kernel/bpf/arraymap.c:380 bpf_map_update_value+0x190/0x370 kernel/bpf/syscall.c:175 generic_map_update_batch+0x3ae/0x4f0 kernel/bpf/syscall.c:1749 bpf_map_do_batch+0x2df/0x3d0 kernel/bpf/syscall.c:4648 __sys_bpf+0x28a/0x780 __do_sys_bpf kernel/bpf/syscall.c:5241 [inline] __se_sys_bpf kernel/bpf/syscall.c:5239 [inline] __x64_sys_bpf+0x43/0x50 kernel/bpf/syscall.c:5239 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd write to 0xffffe8fffe7425d8 of 8 bytes by task 8268 on cpu 0: bpf_long_memcpy include/linux/bpf.h:428 [inline] bpf_obj_memcpy include/linux/bpf.h:441 [inline] copy_map_value_long include/linux/bpf.h:464 [inline] bpf_percpu_array_update+0x3bb/0x500 kernel/bpf/arraymap.c:380 bpf_map_update_value+0x190/0x370 kernel/bpf/syscall.c:175 generic_map_update_batch+0x3ae/0x4f0 kernel/bpf/syscall.c:1749 bpf_map_do_batch+0x2df/0x3d0 kernel/bpf/syscall.c:4648 __sys_bpf+0x28a/0x780 __do_sys_bpf kernel/bpf/syscall.c:5241 [inline] __se_sys_bpf kernel/bpf/syscall.c:5239 [inline] __x64_sys_bpf+0x43/0x50 kernel/bpf/syscall.c:5239 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd value changed: 0x0000000000000000 -> 0xfffffff000002788 The bpf_long_memcpy is used with 8-byte aligned pointers, power-of-8 size and forced to use long read/writes to try to atomically copy long counters. It is best-effort only and no barriers are here since it _will_ race with concurrent updates from BPF programs. The bpf_long_memcpy() is called from bpf(2) syscall. Marco suggested that the best way to make this known to KCSAN would be to use data_race() annotation. Reported-by: syzbot+97522333291430dd277f@syzkaller.appspotmail.com Suggested-by: Marco Elver Signed-off-by: Daniel Borkmann Acked-by: Marco Elver Link: https://lore.kernel.org/bpf/000000000000d87a7f06040c970c@google.com Link: https://lore.kernel.org/bpf/57628f7a15e20d502247c3b55fceb1cb2b31f266.1693342186.git.daniel@iogearbox.net Signed-off-by: wanxiaoqing --- include/linux/bpf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a8b2625e54a4..9018da9132f0 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1480,7 +1480,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) size /= sizeof(long); while (size--) - *ldst++ = *lsrc++; + data_race(*ldst++ = *lsrc++); } /* verify correctness of eBPF program */ -- Gitee From 94251c46eb4ececc2e993427fd1ef8d964f3cb95 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 19 Sep 2023 08:34:15 +0200 Subject: [PATCH 2/7] wifi: mac80211: fix potential key use-after-free mainline inclusion from mainline-v6.6-rc5 commit 31db78a4923ef5e2008f2eed321811ca79e7f71b category: bugfix issue: NA CVE: CVE-2023-52530 Signed-off-by: yaowenrui --------------------------------------- When ieee80211_key_link() is called by ieee80211_gtk_rekey_add() but returns 0 due to KRACK protection (identical key reinstall), ieee80211_gtk_rekey_add() will still return a pointer into the key, in a potential use-after-free. This normally doesn't happen since it's only called by iwlwifi in case of WoWLAN rekey offload which has its own KRACK protection, but still better to fix, do that by returning an error code and converting that to success on the cfg80211 boundary only, leaving the error for bad callers of ieee80211_gtk_rekey_add(). Reported-by: Dan Carpenter Fixes: fdf7cb4185b6 ("mac80211: accept key reinstall without changing anything") Signed-off-by: Johannes Berg Signed-off-by: wanxiaoqing --- net/mac80211/cfg.c | 3 +++ net/mac80211/key.c | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d46ed4cbe771..fd0b47de80af 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -509,6 +509,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, sta->cipher_scheme = cs; err = ieee80211_key_link(key, sdata, sta); + /* KRACK protection, shouldn't happen but just silently accept key */ + if (err == -EALREADY) + err = 0; out_unlock: mutex_unlock(&local->sta_mtx); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 6a72c33679ba..6b089594a9f3 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -843,7 +843,7 @@ int ieee80211_key_link(struct ieee80211_key *key, */ if (ieee80211_key_identical(sdata, old_key, key)) { ieee80211_key_free_unused(key); - ret = 0; + ret = -EALREADY; goto out; } -- Gitee From ea5bc22ef62f1a665cb44b959cdd69c98597cf70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20Ca=C3=B1uelo?= Date: Wed, 30 Aug 2023 12:04:18 +0200 Subject: [PATCH 3/7] usb: hub: Guard against accesses to uninitialized BOS descriptors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit stable inclusion from stable-v5.10.199 commit 241f230324337ed5eae3846a554fb6d15169872c category: bugfix issue: NA CVE: CVE-2023-52477 Signed-off-by: yaowenrui --------------------------------------- commit f74a7afc224acd5e922c7a2e52244d891bbe44ee upstream. Many functions in drivers/usb/core/hub.c and drivers/usb/core/hub.h access fields inside udev->bos without checking if it was allocated and initialized. If usb_get_bos_descriptor() fails for whatever reason, udev->bos will be NULL and those accesses will result in a crash: BUG: kernel NULL pointer dereference, address: 0000000000000018 PGD 0 P4D 0 Oops: 0000 [#1] PREEMPT SMP NOPTI CPU: 5 PID: 17818 Comm: kworker/5:1 Tainted: G W 5.15.108-18910-gab0e1cb584e1 #1 Hardware name: Google Kindred/Kindred, BIOS Google_Kindred.12672.413.0 02/03/2021 Workqueue: usb_hub_wq hub_event RIP: 0010:hub_port_reset+0x193/0x788 Code: 89 f7 e8 20 f7 15 00 48 8b 43 08 80 b8 96 03 00 00 03 75 36 0f b7 88 92 03 00 00 81 f9 10 03 00 00 72 27 48 8b 80 a8 03 00 00 <48> 83 78 18 00 74 19 48 89 df 48 8b 75 b0 ba 02 00 00 00 4c 89 e9 RSP: 0018:ffffab740c53fcf8 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffffa1bc5f678000 RCX: 0000000000000310 RDX: fffffffffffffdff RSI: 0000000000000286 RDI: ffffa1be9655b840 RBP: ffffab740c53fd70 R08: 00001b7d5edaa20c R09: ffffffffb005e060 R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000 R13: ffffab740c53fd3e R14: 0000000000000032 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffffa1be96540000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000018 CR3: 000000022e80c005 CR4: 00000000003706e0 Call Trace: hub_event+0x73f/0x156e ? hub_activate+0x5b7/0x68f process_one_work+0x1a2/0x487 worker_thread+0x11a/0x288 kthread+0x13a/0x152 ? process_one_work+0x487/0x487 ? kthread_associate_blkcg+0x70/0x70 ret_from_fork+0x1f/0x30 Fall back to a default behavior if the BOS descriptor isn't accessible and skip all the functionalities that depend on it: LPM support checks, Super Speed capabilitiy checks, U1/U2 states setup. Signed-off-by: Ricardo CaƱuelo Cc: stable Link: https://lore.kernel.org/r/20230830100418.1952143-1-ricardo.canuelo@collabora.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: wanxiaoqing --- drivers/usb/core/hub.c | 28 ++++++++++++++++++++++++---- drivers/usb/core/hub.h | 2 +- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 585a0fea3cd8..78889d8a2df1 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -146,6 +146,10 @@ int usb_device_supports_lpm(struct usb_device *udev) if (udev->quirks & USB_QUIRK_NO_LPM) return 0; + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return 0; + /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. */ @@ -322,6 +326,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev) if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER) return; + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return; + hub = usb_hub_to_struct_hub(udev->parent); /* It doesn't take time to transition the roothub into U0, since it * doesn't have an upstream link. @@ -2682,7 +2690,8 @@ int usb_authorize_device(struct usb_device *usb_dev) } /* - * Return 1 if port speed is SuperSpeedPlus, 0 otherwise + * Return 1 if port speed is SuperSpeedPlus, 0 otherwise or if the + * capability couldn't be checked. * check it from the link protocol field of the current speed ID attribute. * current speed ID is got from ext port status request. Sublink speed attribute * table is returned with the hub BOS SSP device capability descriptor @@ -2692,8 +2701,12 @@ static int port_speed_is_ssp(struct usb_device *hdev, int speed_id) int ssa_count; u32 ss_attr; int i; - struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap; + struct usb_ssp_cap_descriptor *ssp_cap; + if (!hdev->bos) + return 0; + + ssp_cap = hdev->bos->ssp_cap; if (!ssp_cap) return 0; @@ -4112,8 +4125,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, enum usb3_link_state state) { int timeout, ret; - __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat; - __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat; + __u8 u1_mel; + __le16 u2_mel; + + /* Skip if the device BOS descriptor couldn't be read */ + if (!udev->bos) + return; + + u1_mel = udev->bos->ss_cap->bU1devExitLat; + u2_mel = udev->bos->ss_cap->bU2DevExitLat; /* If the device says it doesn't have *any* exit latency to come out of * U1 or U2, it's probably lying. Assume it doesn't implement that link diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index 22ea1f4f2d66..db4c7e2c5960 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -141,7 +141,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev) { return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS && le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 && - hdev->bos->ssp_cap); + hdev->bos && hdev->bos->ssp_cap); } static inline unsigned hub_power_on_good_delay(struct usb_hub *hub) -- Gitee From e1fe3bb8e7c405bed5c028cfca2eba8dcb6acad5 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 4 Feb 2024 15:25:12 +0000 Subject: [PATCH 4/7] sched/membarrier: reduce the ability to hammer on sys_membarrier stable inclusion from stable-v5.10.210 commit db896bbe4a9c67cee377e5f6a743350d3ae4acf6 category: bugfix issue: #I982MW CVE: CVE-2024-26602 Signed-off-by: yaowenrui --------------------------------------- commit 944d5fe50f3f03daacfea16300e656a1691c4a23 upstream. On some systems, sys_membarrier can be very expensive, causing overall slowdowns for everything. So put a lock on the path in order to serialize the accesses to prevent the ability for this to be called at too high of a frequency and saturate the machine. Reviewed-and-tested-by: Mathieu Desnoyers Acked-by: Borislav Petkov Fixes: 22e4ebb97582 ("membarrier: Provide expedited private command") Fixes: c5f58bd58f43 ("membarrier: Provide GLOBAL_EXPEDITED command") Signed-off-by: Linus Torvalds [ converted to explicit mutex_*() calls - cleanup.h is not in this stable branch - gregkh ] Signed-off-by: Greg Kroah-Hartman Signed-off-by: wanxiaoqing --- kernel/sched/membarrier.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index cc7cd512e4e3..1b7c3bdba8f7 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -34,6 +34,8 @@ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK) +static DEFINE_MUTEX(membarrier_ipi_mutex); + static void ipi_mb(void *info) { smp_mb(); /* IPIs should be serializing but paranoid. */ @@ -119,6 +121,7 @@ static int membarrier_global_expedited(void) if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM; + mutex_lock(&membarrier_ipi_mutex); cpus_read_lock(); rcu_read_lock(); for_each_online_cpu(cpu) { @@ -165,6 +168,8 @@ static int membarrier_global_expedited(void) * rq->curr modification in scheduler. */ smp_mb(); /* exit from system call is not a mb */ + mutex_unlock(&membarrier_ipi_mutex); + return 0; } @@ -208,6 +213,7 @@ static int membarrier_private_expedited(int flags, int cpu_id) if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) return -ENOMEM; + mutex_lock(&membarrier_ipi_mutex); cpus_read_lock(); if (cpu_id >= 0) { @@ -280,6 +286,7 @@ static int membarrier_private_expedited(int flags, int cpu_id) * rq->curr modification in scheduler. */ smp_mb(); /* exit from system call is not a mb */ + mutex_unlock(&membarrier_ipi_mutex); return 0; } @@ -321,6 +328,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) * between threads which are users of @mm has its membarrier state * updated. */ + mutex_lock(&membarrier_ipi_mutex); cpus_read_lock(); rcu_read_lock(); for_each_online_cpu(cpu) { @@ -337,6 +345,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) free_cpumask_var(tmpmask); cpus_read_unlock(); + mutex_unlock(&membarrier_ipi_mutex); return 0; } -- Gitee From 8327a5e4fd8cfd651d53f468420e3c9bc12629fe Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 21 Jun 2023 17:56:53 +0200 Subject: [PATCH 5/7] netfilter: conntrack: dccp: copy entire header to stack buffer, not just basic one stable inclusion from stable-v5.10.188 commit 9bdcda7abaf22f6453e5b5efb7eb4e524095d5d8 category: bugfix issue: #I8J3EE CVE: CVE-2023-39197 Signed-off-by: wanxiaoqing --------------------------------------- [ Upstream commit ff0a3a7d52ff7282dbd183e7fc29a1fe386b0c30 ] Eric Dumazet says: nf_conntrack_dccp_packet() has an unique: dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); And nothing more is 'pulled' from the packet, depending on the content. dh->dccph_doff, and/or dh->dccph_x ...) So dccp_ack_seq() is happily reading stuff past the _dh buffer. BUG: KASAN: stack-out-of-bounds in nf_conntrack_dccp_packet+0x1134/0x11c0 Read of size 4 at addr ffff000128f66e0c by task syz-executor.2/29371 [..] Fix this by increasing the stack buffer to also include room for the extra sequence numbers and all the known dccp packet type headers, then pull again after the initial validation of the basic header. While at it, mark packets invalid that lack 48bit sequence bit but where RFC says the type MUST use them. Compile tested only. v2: first skb_header_pointer() now needs to adjust the size to only pull the generic header. (Eric) Heads-up: I intend to remove dccp conntrack support later this year. Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support") Reported-by: Eric Dumazet Signed-off-by: Florian Westphal Reviewed-by: Eric Dumazet Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin Signed-off-by: wanxiaoqing --- net/netfilter/nf_conntrack_proto_dccp.c | 52 +++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 94001eb51ffe..a9ae292e932a 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -431,9 +431,19 @@ static bool dccp_error(const struct dccp_hdr *dh, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state) { + static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST | + 1 << DCCP_PKT_RESPONSE | + 1 << DCCP_PKT_CLOSEREQ | + 1 << DCCP_PKT_CLOSE | + 1 << DCCP_PKT_RESET | + 1 << DCCP_PKT_SYNC | + 1 << DCCP_PKT_SYNCACK; unsigned int dccp_len = skb->len - dataoff; unsigned int cscov; const char *msg; + u8 type; + + BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG); if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) || dh->dccph_doff * 4 > dccp_len) { @@ -458,10 +468,17 @@ static bool dccp_error(const struct dccp_hdr *dh, goto out_invalid; } - if (dh->dccph_type >= DCCP_PKT_INVALID) { + type = dh->dccph_type; + if (type >= DCCP_PKT_INVALID) { msg = "nf_ct_dccp: reserved packet type "; goto out_invalid; } + + if (test_bit(type, &require_seq48) && !dh->dccph_x) { + msg = "nf_ct_dccp: type lacks 48bit sequence numbers"; + goto out_invalid; + } + return false; out_invalid: nf_l4proto_log_invalid(skb, state->net, state->pf, @@ -469,24 +486,53 @@ static bool dccp_error(const struct dccp_hdr *dh, return true; } +struct nf_conntrack_dccp_buf { + struct dccp_hdr dh; /* generic header part */ + struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */ + union { /* depends on header type */ + struct dccp_hdr_ack_bits ack; + struct dccp_hdr_request req; + struct dccp_hdr_response response; + struct dccp_hdr_reset rst; + } u; +}; + +static struct dccp_hdr * +dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh, + struct nf_conntrack_dccp_buf *buf) +{ + unsigned int hdrlen = __dccp_hdr_len(dh); + + if (hdrlen > sizeof(*buf)) + return NULL; + + return skb_header_pointer(skb, offset, hdrlen, buf); +} + int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - struct dccp_hdr _dh, *dh; + struct nf_conntrack_dccp_buf _dh; u_int8_t type, old_state, new_state; enum ct_dccp_roles role; unsigned int *timeouts; + struct dccp_hdr *dh; - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); + dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh); if (!dh) return NF_DROP; if (dccp_error(dh, skb, dataoff, state)) return -NF_ACCEPT; + /* pull again, including possible 48 bit sequences and subtype header */ + dh = dccp_header_pointer(skb, dataoff, dh, &_dh); + if (!dh) + return NF_DROP; + type = dh->dccph_type; if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh)) return -NF_ACCEPT; -- Gitee From 98bd9e5a8b027ea28934c379147a6835eb7db28e Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Thu, 3 Aug 2023 08:41:22 +0200 Subject: [PATCH 6/7] xen/netback: Fix buffer overrun triggered by unusual packet stable inclusion from stable-v5.10.189 commit f9167a2d6b943f30743de6ff8163d1981c34f9a9 category: bugfix issue: #I7UAQ6 CVE: CVE-2023-34319 Signed-off-by: yaowenrui --------------------------------------- commit 534fc31d09b706a16d83533e16b5dc855caf7576 upstream. It is possible that a guest can send a packet that contains a head + 18 slots and yet has a len <= XEN_NETBACK_TX_COPY_LEN. This causes nr_slots to underflow in xenvif_get_requests() which then causes the subsequent loop's termination condition to be wrong, causing a buffer overrun of queue->tx_map_ops. Rework the code to account for the extra frag_overflow slots. This is CVE-2023-34319 / XSA-432. Fixes: ad7f402ae4f4 ("xen/netback: Ensure protocol headers don't fall in the non-linear area") Signed-off-by: Ross Lagerwall Reviewed-by: Paul Durrant Reviewed-by: Wei Liu Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman Signed-off-by: Zhengchao Shao Signed-off-by: yaowenrui --- drivers/net/xen-netback/netback.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 2898d74572b1..ec814144f5c9 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -391,7 +391,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; struct xen_netif_tx_request *txp = first; - nr_slots = shinfo->nr_frags + 1; + nr_slots = shinfo->nr_frags + frag_overflow + 1; copy_count(skb) = 0; @@ -446,8 +446,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } } - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, gop++) { + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; + shinfo->nr_frags++, gop++, nr_slots--) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, @@ -460,12 +460,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue, txp++; } - if (frag_overflow) { + if (nr_slots > 0) { shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; @@ -476,6 +476,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } skb_shinfo(skb)->frag_list = nskb; + } else if (nskb) { + /* A frag_list skb was allocated but it is no longer needed + * because enough slots were converted to copy ops above. + */ + kfree_skb(nskb); } (*copy_ops) = cop - queue->tx_copy_ops; -- Gitee From c04fb93e86681860f4d554e6b10af29b664f0b25 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 8 Jan 2024 10:38:12 +0100 Subject: [PATCH 7/7] xen-netback: don't produce zero-size SKB frags stable inclusion from stable-v5.10.209 commit cce8ba6fa4ec43ad778d64823a2f8ca120d362c1 category: bugfix issue: #I917CU CVE: CVE-2023-46838 Signed-off-by: wanxiaoqing --------------------------------------- commit c7ec4f2d684e17d69bbdd7c4324db0ef5daac26a upstream. While frontends may submit zero-size requests (wasting a precious slot), core networking code as of at least 3ece782693c4b ("sock: skb_copy_ubufs support for compound pages") can't deal with SKBs when they have all zero-size fragments. Respond to empty requests right when populating fragments; all further processing is fragment based and hence won't encounter these empty requests anymore. In a way this should have been that way from the beginning: When no data is to be transferred for a particular request, there's not even a point in validating the respective grant ref. That's no different from e.g. passing NULL into memcpy() when at the same time the size is 0. This is XSA-448 / CVE-2023-46838. Cc: stable@vger.kernel.org Signed-off-by: Jan Beulich Reviewed-by: Juergen Gross Reviewed-by: Paul Durrant Signed-off-by: Greg Kroah-Hartman Signed-off-by: wanxiaoqing --- drivers/net/xen-netback/netback.c | 44 ++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index ec814144f5c9..3dcb2fa9ae3c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -447,12 +447,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; - shinfo->nr_frags++, gop++, nr_slots--) { + nr_slots--) { + if (unlikely(!txp->size)) { + unsigned long flags; + + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); + spin_unlock_irqrestore(&queue->response_lock, flags); + ++txp; + continue; + } + index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, txp == first ? extra_count : 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + ++shinfo->nr_frags; + ++gop; if (txp == first) txp = txfrags; @@ -465,20 +478,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue, shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, txp++, gop++) { + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { + if (unlikely(!txp->size)) { + unsigned long flags; + + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, 0, + XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); + spin_unlock_irqrestore(&queue->response_lock, + flags); + continue; + } + index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + ++shinfo->nr_frags; + ++gop; } - skb_shinfo(skb)->frag_list = nskb; - } else if (nskb) { + if (shinfo->nr_frags) { + skb_shinfo(skb)->frag_list = nskb; + nskb = NULL; + } + } + + if (nskb) { /* A frag_list skb was allocated but it is no longer needed - * because enough slots were converted to copy ops above. + * because enough slots were converted to copy ops above or some + * were empty. */ kfree_skb(nskb); } -- Gitee