From b403b3339ea4dc825b887a962c3a5c681464c085 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 6 Dec 2023 15:27:05 -0800 Subject: [PATCH 1/6] net: tls, update curr on splice as well stable inclusion from stable-v5.10.208 commit c6b2a6b827d4b2d0f36b520e54e083df9b330a7b category: bugfix issue: #I91BE9 CVE: CVE-2024-0646 Signed-off-by: wanxiaoqing --------------------------------------- commit c5a595000e2677e865a39f249c056bc05d6e55fd upstream. The curr pointer must also be updated on the splice similar to how we do this for other copy types. Fixes: d829e9c4112b ("tls: convert to generic sk_msg interface") Signed-off-by: John Fastabend Reported-by: Jann Horn Link: https://lore.kernel.org/r/20231206232706.374377-2-john.fastabend@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman Signed-off-by: wanxiaoqing --- net/tls/tls_sw.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 50eae668578a..dd980438f201 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1215,6 +1215,8 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page, } sk_msg_page_add(msg_pl, page, copy, offset); + msg_pl->sg.copybreak = 0; + msg_pl->sg.curr = msg_pl->sg.end; sk_mem_charge(sk, copy); offset += copy; -- Gitee From e606b17209f68203a5dc6fa693e3c897f114ce6b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 20 Jan 2024 22:50:04 +0100 Subject: [PATCH 2/6] netfilter: nf_tables: reject QUEUE/DROP verdict parameters mainline inclusion from mainline-v6.8-rc2 commit f342de4e2f33e0e39165d8639387aa6c19dff660 category: bugfix issue: #I917B9 CVE: CVE-2024-1086 Signed-off-by: wanxiaoqing40281 --------------------------------------- This reverts commit e0abdadcc6e1. core.c:nf_hook_slow assumes that the upper 16 bits of NF_DROP verdicts contain a valid errno, i.e. -EPERM, -EHOSTUNREACH or similar, or 0. Due to the reverted commit, its possible to provide a positive value, e.g. NF_ACCEPT (1), which results in use-after-free. Its not clear to me why this commit was made. NF_QUEUE is not used by nftables; "queue" rules in nftables will result in use of "nft_queue" expression. If we later need to allow specifiying errno values from userspace (do not know why), this has to call NF_DROP_GETERR and check that "err <= 0" holds true. Fixes: e0abdadcc6e1 ("netfilter: nf_tables: accept QUEUE/DROP verdict parameters") Cc: stable@vger.kernel.org Reported-by: Notselwyn Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso Signed-off-by: wanxiaoqing --- net/netfilter/nf_tables_api.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 48f340ef492a..50306a9af6f7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -8688,16 +8688,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE])); switch (data->verdict.code) { - default: - switch (data->verdict.code & NF_VERDICT_MASK) { - case NF_ACCEPT: - case NF_DROP: - case NF_QUEUE: - break; - default: - return -EINVAL; - } - fallthrough; + case NF_ACCEPT: + case NF_DROP: + case NF_QUEUE: + break; case NFT_CONTINUE: case NFT_BREAK: case NFT_RETURN: @@ -8731,6 +8725,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, chain->use++; data->verdict.chain = chain; break; + default: + return -EINVAL; } desc->len = sizeof(data->verdict); -- Gitee From 755bfaf85a62f52a7b116141b1b2304ec4791bb2 Mon Sep 17 00:00:00 2001 From: liangbotong Date: Wed, 24 Jan 2024 14:37:07 +0800 Subject: [PATCH 3/6] setsockopt adds the netibooster ultra-reliable connection feature Signed-off-by: liangbotong --- include/linux/tcp.h | 3 +++ include/net/inet_connection_sock.h | 10 ++++++++ include/net/tcp.h | 12 +++++++++ include/uapi/linux/tcp.h | 9 ++++++- net/Kconfig | 7 ++++++ net/ipv4/tcp.c | 39 ++++++++++++++++++++++++++++++ net/ipv4/tcp_output.c | 7 ++++-- net/ipv4/tcp_timer.c | 8 ++++++ 8 files changed, 92 insertions(+), 3 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index e62bf28b8ffd..8c7dc2ecbd58 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -365,6 +365,9 @@ struct tcp_sock { #else #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 #endif +#ifdef CONFIG_TCP_NB_URC + u16 tcp_retries2; +#endif /* CONFIG_TCP_NB_URC */ u16 timeout_rehash; /* Timeout-triggered rehash attempts */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index ff901aade442..751720b5c62d 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -108,6 +108,11 @@ struct inet_connection_sock { __u8 icsk_syn_retries; __u8 icsk_probes_out; __u16 icsk_ext_hdr_len; +#ifdef CONFIG_TCP_NB_URC + __u8 icsk_nb_urc_enabled:1, + icsk_nb_urc_reserved:7; + __u32 icsk_nb_urc_rto; +#endif /* CONFIG_TCP_NB_URC */ struct { __u8 pending; /* ACK is pending */ __u8 quick; /* Scheduled number of quick acks */ @@ -220,6 +225,11 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, { struct inet_connection_sock *icsk = inet_csk(sk); +#ifdef CONFIG_TCP_NB_URC + if (icsk->icsk_nb_urc_enabled) + when = icsk->icsk_nb_urc_rto; +#endif /* CONFIG_TCP_NB_URC */ + if (when > max_when) { pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, (void *)_THIS_IP_); diff --git a/include/net/tcp.h b/include/net/tcp.h index 114ed8a65a88..fe849f814582 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2393,4 +2393,16 @@ static inline u64 tcp_transmit_time(const struct sock *sk) return 0; } +#ifdef CONFIG_TCP_NB_URC +static inline int tcp_get_retries_limit(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + + if (inet_csk(sk)->icsk_nb_urc_enabled) + return tp->tcp_retries2; + + return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); +} +#endif /* CONFIG_TCP_NB_URC */ + #endif /* _TCP_H */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 62db78b9c1a0..27d542fa014b 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -128,12 +128,19 @@ enum { #define TCP_CM_INQ TCP_INQ #define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */ - +#define TCP_NB_URC 101 /* netibooster ultar-reliable connection */ #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ +struct tcp_nb_urc { + __u8 nb_urc_enabled; + __u8 syn_retries; + __u16 tcp_retries2; + __u32 nb_urc_rto; +}; + struct tcp_repair_opt { __u32 opt_code; __u32 opt_val; diff --git a/net/Kconfig b/net/Kconfig index e64a689c1f27..b07ee3b024ab 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -459,6 +459,13 @@ config ETHTOOL_NETLINK source "net/newip/Kconfig" +config TCP_NB_URC + bool "NetiBooster Ultra-Reliable Connection Feature" + default n + help + This option enables the NetiBooster Ultra-Reliable Connection feature. + When enabled, the kernel will include support for CONFIG_TCP_NB_URC. + endif # if NET # Used by archs to tell that they support BPF JIT compiler plus which flavour. diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index a1de705e2ddc..c7f22c43dd1a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -457,6 +457,11 @@ void tcp_init_sock(struct sock *sk) sk_sockets_allocated_inc(sk); sk->sk_route_forced_caps = NETIF_F_GSO; +#ifdef CONFIG_TCP_NB_URC + icsk->icsk_nb_urc_enabled = 0; + icsk->icsk_nb_urc_rto = TCP_TIMEOUT_INIT; + tp->tcp_retries2 = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); +#endif /* CONFIG_TCP_NB_URC */ } EXPORT_SYMBOL(tcp_init_sock); @@ -2807,6 +2812,11 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_rto = TCP_TIMEOUT_INIT; icsk->icsk_rto_min = TCP_RTO_MIN; icsk->icsk_delack_max = TCP_DELACK_MAX; +#ifdef CONFIG_TCP_NB_URC + icsk->icsk_nb_urc_enabled = 0; + icsk->icsk_nb_urc_rto = TCP_TIMEOUT_INIT; + tp->tcp_retries2 = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); +#endif /* CONFIG_TCP_NB_URC */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd = TCP_INIT_CWND; tp->snd_cwnd_cnt = 0; @@ -3143,6 +3153,30 @@ int tcp_sock_set_keepcnt(struct sock *sk, int val) } EXPORT_SYMBOL(tcp_sock_set_keepcnt); +#ifdef CONFIG_TCP_NB_URC +static int tcp_set_nb_urc(struct sock *sk, sockptr_t optval, int optlen) { + int err = 0; + struct tcp_nb_urc opt = {}; + struct inet_connection_sock *icsk = inet_csk(sk); + + if (optlen != sizeof(struct tcp_nb_urc)) { + err = -EINVAL; + return err; + } + + if (copy_from_sockptr(&opt, optval, sizeof(struct tcp_nb_urc))) { + err = -EINVAL; + return err; + } + + icsk->icsk_syn_retries = opt.syn_retries; + tcp_sk(sk)->tcp_retries2 = opt.tcp_retries2; + icsk->icsk_nb_urc_enabled = opt.nb_urc_enabled; + icsk->icsk_nb_urc_rto = opt.nb_urc_rto; + + return err; +} +#endif /* CONFIG_TCP_NB_URC */ /* * Socket option code for TCP. */ @@ -3449,6 +3483,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, tcp_enable_tx_delay(); tp->tcp_tx_delay = val; break; +#ifdef CONFIG_TCP_NB_URC + case TCP_NB_URC: + err = tcp_set_nb_urc(sk, optval, optlen); + break; +#endif /* CONFIG_TCP_NB_URC */ default: err = -ENOPROTOOPT; break; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e4ad274ec7a3..619d92b3ac2e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -4088,7 +4088,6 @@ void tcp_send_probe0(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - struct net *net = sock_net(sk); unsigned long timeout; int err; @@ -4104,7 +4103,11 @@ void tcp_send_probe0(struct sock *sk) icsk->icsk_probes_out++; if (err <= 0) { - if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) +#ifdef CONFIG_TCP_NB_URC + if (icsk->icsk_backoff < tcp_get_retries_limit(sk)) +#else + if (icsk->icsk_backoff < READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2)) +#endif /* CONFIG_TCP_NB_URC */ icsk->icsk_backoff++; timeout = tcp_probe0_when(sk, TCP_RTO_MAX); } else { diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 888683f2ff3e..58b2555c76f9 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -249,7 +249,11 @@ static int tcp_write_timeout(struct sock *sk) __dst_negative_advice(sk); } +#ifdef CONFIG_TCP_NB_URC + retry_until = tcp_get_retries_limit(sk); +#else retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); +#endif /* CONFIG_TCP_NB_URC */ if (sock_flag(sk, SOCK_DEAD)) { const bool alive = icsk->icsk_rto < TCP_RTO_MAX; @@ -380,7 +384,11 @@ static void tcp_probe_timer(struct sock *sk) msecs_to_jiffies(icsk->icsk_user_timeout)) goto abort; +#ifdef CONFIG_TCP_NB_URC + max_probes = tcp_get_retries_limit(sk); +#else max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); +#endif /* CONFIG_TCP_NB_URC */ if (sock_flag(sk, SOCK_DEAD)) { const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; -- Gitee From c9f7b2125453a9693ab13bd0f0bee9117b1d4319 Mon Sep 17 00:00:00 2001 From: Sharath Srinivasan Date: Fri, 19 Jan 2024 17:48:39 -0800 Subject: [PATCH 4/6] net/rds: Fix UBSAN: array-index-out-of-bounds in rds_cmsg_recv mainline inclusion from mainline-v6.8-rc2 commit 13e788deb7348cc88df34bed736c3b3b9927ea52 category: bugfix issue: #I900BK CVE: CVE-2024-23849 Signed-off-by: wanxiaoqing --------------------------------------- Syzcaller UBSAN crash occurs in rds_cmsg_recv(), which reads inc->i_rx_lat_trace[j + 1] with index 4 (3 + 1), but with array size of 4 (RDS_RX_MAX_TRACES). Here 'j' is assigned from rs->rs_rx_trace[i] and in-turn from trace.rx_trace_pos[i] in rds_recv_track_latency(), with both arrays sized 3 (RDS_MSG_RX_DGRAM_TRACE_MAX). So fix the off-by-one bounds check in rds_recv_track_latency() to prevent a potential crash in rds_cmsg_recv(). Found by syzcaller: ================================================================= UBSAN: array-index-out-of-bounds in net/rds/recv.c:585:39 index 4 is out of range for type 'u64 [4]' CPU: 1 PID: 8058 Comm: syz-executor228 Not tainted 6.6.0-gd2f51b3516da #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x136/0x150 lib/dump_stack.c:106 ubsan_epilogue lib/ubsan.c:217 [inline] __ubsan_handle_out_of_bounds+0xd5/0x130 lib/ubsan.c:348 rds_cmsg_recv+0x60d/0x700 net/rds/recv.c:585 rds_recvmsg+0x3fb/0x1610 net/rds/recv.c:716 sock_recvmsg_nosec net/socket.c:1044 [inline] sock_recvmsg+0xe2/0x160 net/socket.c:1066 __sys_recvfrom+0x1b6/0x2f0 net/socket.c:2246 __do_sys_recvfrom net/socket.c:2264 [inline] __se_sys_recvfrom net/socket.c:2260 [inline] __x64_sys_recvfrom+0xe0/0x1b0 net/socket.c:2260 do_syscall_x64 arch/x86/entry/common.c:51 [inline] do_syscall_64+0x40/0x110 arch/x86/entry/common.c:82 entry_SYSCALL_64_after_hwframe+0x63/0x6b ================================================================== Fixes: 3289025aedc0 ("RDS: add receive message trace used by application") Reported-by: Chenyuan Yang Closes: https://lore.kernel.org/linux-rdma/CALGdzuoVdq-wtQ4Az9iottBqC5cv9ZhcE5q8N7LfYFvkRsOVcw@mail.gmail.com/ Signed-off-by: Sharath Srinivasan Reviewed-by: Simon Horman Signed-off-by: David S. Miller Signed-off-by: wanxiaoqing --- net/rds/af_rds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index b239120dd9ca..0ec0ae148349 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -419,7 +419,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval, rs->rs_rx_traces = trace.rx_traces; for (i = 0; i < rs->rs_rx_traces; i++) { - if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) { + if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) { rs->rs_rx_traces = 0; return -EFAULT; } -- Gitee From b585bc01659b71ae02ad3e86b8dbe9940083253c Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Sat, 20 Jan 2024 19:41:28 +1030 Subject: [PATCH 5/6] btrfs: do not ASSERT() if the newly created subvolume already got read mainline inclusion from mainline-v6.8-rc4 commit e03ee2fe873eb68c1f9ba5112fee70303ebf9dfb category: bugfix issue: #I900BT CVE: CVE-2024-23850 Signed-off-by: wanxiaoqing --------------------------------------- [BUG] There is a syzbot crash, triggered by the ASSERT() during subvolume creation: assertion failed: !anon_dev, in fs/btrfs/disk-io.c:1319 ------------[ cut here ]------------ kernel BUG at fs/btrfs/disk-io.c:1319! invalid opcode: 0000 [#1] PREEMPT SMP KASAN RIP: 0010:btrfs_get_root_ref.part.0+0x9aa/0xa60 btrfs_get_new_fs_root+0xd3/0xf0 create_subvol+0xd02/0x1650 btrfs_mksubvol+0xe95/0x12b0 __btrfs_ioctl_snap_create+0x2f9/0x4f0 btrfs_ioctl_snap_create+0x16b/0x200 btrfs_ioctl+0x35f0/0x5cf0 __x64_sys_ioctl+0x19d/0x210 do_syscall_64+0x3f/0xe0 entry_SYSCALL_64_after_hwframe+0x63/0x6b ---[ end trace 0000000000000000 ]--- [CAUSE] During create_subvol(), after inserting root item for the newly created subvolume, we would trigger btrfs_get_new_fs_root() to get the btrfs_root of that subvolume. The idea here is, we have preallocated an anonymous device number for the subvolume, thus we can assign it to the new subvolume. But there is really nothing preventing things like backref walk to read the new subvolume. If that happens before we call btrfs_get_new_fs_root(), the subvolume would be read out, with a new anonymous device number assigned already. In that case, we would trigger ASSERT(), as we really expect no one to read out that subvolume (which is not yet accessible from the fs). But things like backref walk is still possible to trigger the read on the subvolume. Thus our assumption on the ASSERT() is not correct in the first place. [FIX] Fix it by removing the ASSERT(), and just free the @anon_dev, reset it to 0, and continue. If the subvolume tree is read out by something else, it should have already get a new anon_dev assigned thus we only need to free the preallocated one. Reported-by: Chenyuan Yang Fixes: 2dfb1e43f57d ("btrfs: preallocate anon block device at first phase of snapshot creation") CC: stable@vger.kernel.org # 5.15+ Reviewed-by: Filipe Manana Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: wanxiaoqing --- fs/btrfs/disk-io.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5a114cad988a..f2f42ffdbfdd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1553,8 +1553,17 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, again: root = btrfs_lookup_fs_root(fs_info, objectid); if (root) { - /* Shouldn't get preallocated anon_dev for cached roots */ - ASSERT(!anon_dev); + /* + * Some other caller may have read out the newly inserted + * subvolume already (for things like backref walk etc). Not + * that common but still possible. In that case, we just need + * to free the anon_dev. + */ + if (unlikely(anon_dev)) { + free_anon_bdev(anon_dev); + anon_dev = 0; + } + if (check_ref && btrfs_root_refs(&root->root_item) == 0) { btrfs_put_root(root); return ERR_PTR(-ENOENT); -- Gitee From 2ff17562216dc50ddf17bc14aea5d4f3e79c1b00 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 9 Jan 2024 15:57:56 +0100 Subject: [PATCH 6/6] dm: limit the number of targets and parameter size area mainline inclusion from mainline-v6.8-rc3 commit bd504bcfec41a503b32054da5472904b404341a4 category: bugfix issue: #I900CX CVE: CVE-2024-23851 Signed-off-by: wanxiaoqing --------------------------------------- The kvmalloc function fails with a warning if the size is larger than INT_MAX. The warning was triggered by a syscall testing robot. In order to avoid the warning, this commit limits the number of targets to 1048576 and the size of the parameter area to 1073741824. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Signed-off-by: wanxiaoqing --- drivers/md/dm-core.h | 2 ++ drivers/md/dm-ioctl.c | 2 ++ drivers/md/dm-table.c | 9 +++++++-- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 3db92d9a030b..ff73b2c17be5 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -19,6 +19,8 @@ #include "dm.h" #define DM_RESERVED_MAX_IOS 1024 +#define DM_MAX_TARGETS 1048576 +#define DM_MAX_TARGET_PARAMS 1024 struct dm_kobject_holder { struct kobject kobj; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 5f9b9178c647..2e3f24ef1d13 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1761,6 +1761,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern return -EFAULT; if (param_kernel->data_size < minimum_data_size) + if (unlikely(param_kernel->data_size < minimum_data_size) || + unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) return -EINVAL; secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5c590895c14c..31bcdcd93c7a 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -144,7 +144,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num) int dm_table_create(struct dm_table **result, fmode_t mode, unsigned num_targets, struct mapped_device *md) { - struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); + struct dm_table *t; + + if (num_targets > DM_MAX_TARGETS) + return -EOVERFLOW; + + t = kzalloc(sizeof(*t), GFP_KERNEL); if (!t) return -ENOMEM; @@ -158,7 +163,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode, if (!num_targets) { kfree(t); - return -ENOMEM; + return -EOVERFLOW; } if (alloc_targets(t, num_targets)) { -- Gitee