From 434b03ac6ea340afe0a83b155a66306a08d807f5 Mon Sep 17 00:00:00 2001 From: Julien Stephan Date: Thu, 30 May 2024 11:22:46 +0200 Subject: [PATCH 1/6] driver: iio: add missing checks on iio_info's callback access mainline inclusion from mainline-v6.11-rc1~100^2~69^2~31 commit c4ec8dedca961db056ec85cb7ca8c9f7e2e92252 category: bugfix issue: #IBAVZS CVE: CVE-2024-46715 Signed-off-by: maxinyi --------------------------------------- Some callbacks from iio_info structure are accessed without any check, so if a driver doesn't implement them trying to access the corresponding sysfs entries produce a kernel oops such as: [ 2203.527791] Unable to handle kernel NULL pointer dereference at virtual address 00000000 when execute [...] [ 2203.783416] Call trace: [ 2203.783429] iio_read_channel_info_avail from dev_attr_show+0x18/0x48 [ 2203.789807] dev_attr_show from sysfs_kf_seq_show+0x90/0x120 [ 2203.794181] sysfs_kf_seq_show from seq_read_iter+0xd0/0x4e4 [ 2203.798555] seq_read_iter from vfs_read+0x238/0x2a0 [ 2203.802236] vfs_read from ksys_read+0xa4/0xd4 [ 2203.805385] ksys_read from ret_fast_syscall+0x0/0x54 [ 2203.809135] Exception stack(0xe0badfa8 to 0xe0badff0) [ 2203.812880] dfa0: 00000003 b6f10f80 00000003 b6eab000 00020000 00000000 [ 2203.819746] dfc0: 00000003 b6f10f80 7ff00000 00000003 00000003 00000000 00020000 00000000 [ 2203.826619] dfe0: b6e1bc88 bed80958 b6e1bc94 b6e1bcb0 [ 2203.830363] Code: bad PC value [ 2203.832695] ---[ end trace 0000000000000000 ]--- Reviewed-by: Nuno Sa Signed-off-by: Julien Stephan Link: https://lore.kernel.org/r/20240530-iio-core-fix-segfault-v3-1-8b7cd2a03773@baylibre.com Signed-off-by: Jonathan Cameron Signed-off-by: mxy --- drivers/iio/industrialio-core.c | 7 ++++++- drivers/iio/industrialio-event.c | 9 ++++++++ drivers/iio/inkern.c | 35 ++++++++++++++++++++++---------- 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index ea98aad9fb81..b57461731718 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -684,9 +684,11 @@ static ssize_t iio_read_channel_info(struct device *dev, INDIO_MAX_RAW_ELEMENTS, vals, &val_len, this_attr->address); - else + else if (indio_dev->info->read_raw) ret = indio_dev->info->read_raw(indio_dev, this_attr->c, &vals[0], &vals[1], this_attr->address); + else + return -EINVAL; if (ret < 0) return ret; @@ -791,6 +793,9 @@ static ssize_t iio_read_channel_info_avail(struct device *dev, int length; int type; + if (!indio_dev->info->read_avail) + return -EINVAL; + ret = indio_dev->info->read_avail(indio_dev, this_attr->c, &vals, &type, &length, this_attr->address); diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c index 99ba657b8568..8720b86d8834 100644 --- a/drivers/iio/industrialio-event.c +++ b/drivers/iio/industrialio-event.c @@ -274,6 +274,9 @@ static ssize_t iio_ev_state_store(struct device *dev, if (ret < 0) return ret; + if (!indio_dev->info->write_event_config) + return -EINVAL; + ret = indio_dev->info->write_event_config(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), val); @@ -289,6 +292,9 @@ static ssize_t iio_ev_state_show(struct device *dev, struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int val; + if (!indio_dev->info->read_event_config) + return -EINVAL; + val = indio_dev->info->read_event_config(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr)); @@ -307,6 +313,9 @@ static ssize_t iio_ev_value_show(struct device *dev, int val, val2, val_arr[2]; int ret; + if (!indio_dev->info->read_event_value) + return -EINVAL; + ret = indio_dev->info->read_event_value(indio_dev, this_attr->c, iio_ev_attr_type(this_attr), iio_ev_attr_dir(this_attr), iio_ev_attr_info(this_attr), diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index c32b2577dd99..746e05dd899d 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -500,6 +500,7 @@ EXPORT_SYMBOL_GPL(devm_iio_channel_get_all); static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, enum iio_chan_info_enum info) { + const struct iio_info *iio_info = chan->indio_dev->info; int unused; int vals[INDIO_MAX_RAW_ELEMENTS]; int ret; @@ -511,15 +512,19 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, if (!iio_channel_has_info(chan->channel, info)) return -EINVAL; - if (chan->indio_dev->info->read_raw_multi) { - ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, - chan->channel, INDIO_MAX_RAW_ELEMENTS, - vals, &val_len, info); + if (iio_info->read_raw_multi) { + ret = iio_info->read_raw_multi(chan->indio_dev, + chan->channel, + INDIO_MAX_RAW_ELEMENTS, + vals, &val_len, info); *val = vals[0]; *val2 = vals[1]; - } else - ret = chan->indio_dev->info->read_raw(chan->indio_dev, - chan->channel, val, val2, info); + } else if (iio_info->read_raw) { + ret = iio_info->read_raw(chan->indio_dev, + chan->channel, val, val2, info); + } else { + return -EINVAL; + } return ret; } @@ -720,11 +725,15 @@ static int iio_channel_read_avail(struct iio_channel *chan, const int **vals, int *type, int *length, enum iio_chan_info_enum info) { + const struct iio_info *iio_info = chan->indio_dev->info; + if (!iio_channel_has_available(chan->channel, info)) return -EINVAL; - return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel, - vals, type, length, info); + if (iio_info->read_avail) + return iio_info->read_avail(chan->indio_dev, chan->channel, + vals, type, length, info); + return -EINVAL; } int iio_read_avail_channel_attribute(struct iio_channel *chan, @@ -852,8 +861,12 @@ EXPORT_SYMBOL_GPL(iio_get_channel_type); static int iio_channel_write(struct iio_channel *chan, int val, int val2, enum iio_chan_info_enum info) { - return chan->indio_dev->info->write_raw(chan->indio_dev, - chan->channel, val, val2, info); + const struct iio_info *iio_info = chan->indio_dev->info; + + if (iio_info->write_raw) + return iio_info->write_raw(chan->indio_dev, + chan->channel, val, val2, info); + return -EINVAL; } int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2, -- Gitee From 9b331c1a9fc8402ba3578c61e9253f93a5fe8b7a Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Tue, 15 Oct 2024 15:23:01 +0200 Subject: [PATCH 2/6] media: dvbdev: prevent the risk of out of memory access stable inclusion from stable-v5.10.230~60 commit a4a17210c03ade1c8d9a9f193a105654b7a05c11 category: bugfix issue: #IBAVZS CVE: CVE-2024-53063 Signed-off-by: maxinyi --------------------------------------- [ Upstream commit 972e63e895abbe8aa1ccbdbb4e6362abda7cd457 ] The dvbdev contains a static variable used to store dvb minors. The behavior of it depends if CONFIG_DVB_DYNAMIC_MINORS is set or not. When not set, dvb_register_device() won't check for boundaries, as it will rely that a previous call to dvb_register_adapter() would already be enforcing it. On a similar way, dvb_device_open() uses the assumption that the register functions already did the needed checks. This can be fragile if some device ends using different calls. This also generate warnings on static check analysers like Coverity. So, add explicit guards to prevent potential risk of OOM issues. Fixes: 5dd3f3071070 ("V4L/DVB (9361): Dynamic DVB minor allocation") Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin Signed-off-by: mxy --- drivers/media/dvb-core/dvbdev.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 467be48b9004..2b9a2082b568 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -96,10 +96,15 @@ static DECLARE_RWSEM(minor_rwsem); static int dvb_device_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev; + unsigned int minor = iminor(inode); + + if (minor >= MAX_DVB_MINORS) + return -ENODEV; mutex_lock(&dvbdev_mutex); down_read(&minor_rwsem); - dvbdev = dvb_minors[iminor(inode)]; + + dvbdev = dvb_minors[minor]; if (dvbdev && dvbdev->fops) { int err = 0; @@ -537,7 +542,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (dvb_minors[minor] == NULL) break; - if (minor == MAX_DVB_MINORS) { + if (minor >= MAX_DVB_MINORS) { if (new_node) { list_del (&new_node->list_head); kfree(dvbdevfops); @@ -552,6 +557,14 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, } #else minor = nums2minor(adap->num, type, id); + if (minor >= MAX_DVB_MINORS) { + dvb_media_device_free(dvbdev); + list_del(&dvbdev->list_head); + kfree(dvbdev); + *pdvbdev = NULL; + mutex_unlock(&dvbdev_register_lock); + return ret; + } #endif dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); -- Gitee From 0e985cf83942328249fd79601f1eb0c4bc3bb9bb Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 6 Nov 2024 21:50:55 +0100 Subject: [PATCH 3/6] media: dvbdev: fix the logic when DVB_DYNAMIC_MINORS is not set mainline inclusion from mainline-v6.12-rc7~18^2~1 commit a4aebaf6e6efff548b01a3dc49b4b9074751c15b category: bugfix issue: #IBAVZS CVE: CVE-2024-53063 Signed-off-by: maxinyi --------------------------------------- When CONFIG_DVB_DYNAMIC_MINORS, ret is not initialized, and a semaphore is left at the wrong state, in case of errors. Make the code simpler and avoid mistakes by having just one error check logic used weather DVB_DYNAMIC_MINORS is used or not. Reported-by: kernel test robot Reported-by: Dan Carpenter Closes: https://lore.kernel.org/r/202410201717.ULWWdJv8-lkp@intel.com/ Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/9e067488d8935b8cf00959764a1fa5de85d65725.1730926254.git.mchehab+huawei@kernel.org Signed-off-by: mxy --- drivers/media/dvb-core/dvbdev.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 2b9a2082b568..06b6477b83b0 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -542,6 +542,9 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (dvb_minors[minor] == NULL) break; +#else + minor = nums2minor(adap->num, type, id); +#endif if (minor >= MAX_DVB_MINORS) { if (new_node) { list_del (&new_node->list_head); @@ -555,17 +558,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, mutex_unlock(&dvbdev_register_lock); return -EINVAL; } -#else - minor = nums2minor(adap->num, type, id); - if (minor >= MAX_DVB_MINORS) { - dvb_media_device_free(dvbdev); - list_del(&dvbdev->list_head); - kfree(dvbdev); - *pdvbdev = NULL; - mutex_unlock(&dvbdev_register_lock); - return ret; - } -#endif + dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); -- Gitee From 43c3d8f3438e3eddffa79f453b563a5c0f87f12f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 4 Nov 2024 20:16:42 +0300 Subject: [PATCH 4/6] usb: typec: fix potential out of bounds in ucsi_ccg_update_set_new_cam_cmd() mainline inclusion from mainline-v6.12-rc7~6^2~2 commit 7dd08a0b4193087976db6b3ee7807de7e8316f96 category: bugfix issue: #IBAVZS CVE: CVE-2024-50268 Signed-off-by: maxinyi --------------------------------------- The "*cmd" variable can be controlled by the user via debugfs. That means "new_cam" can be as high as 255 while the size of the uc->updated[] array is UCSI_MAX_ALTMODES (30). The call tree is: ucsi_cmd() // val comes from simple_attr_write_xsigned() -> ucsi_send_command() -> ucsi_send_command_common() -> ucsi_run_command() // calls ucsi->ops->sync_control() -> ucsi_ccg_sync_control() Fixes: 170a6726d0e2 ("usb: typec: ucsi: add support for separate DP altmode devices") Cc: stable Signed-off-by: Dan Carpenter Reviewed-by: Heikki Krogerus Link: https://lore.kernel.org/r/325102b3-eaa8-4918-a947-22aca1146586@stanley.mountain Signed-off-by: Greg Kroah-Hartman Signed-off-by: mxy --- drivers/usb/typec/ucsi/ucsi_ccg.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index 6db7c8ddd51c..fb6211efb5d8 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -436,6 +436,8 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc, port = uc->orig; new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd); + if (new_cam >= ARRAY_SIZE(uc->updated)) + return; new_port = &uc->updated[new_cam]; cam = new_port->linked_idx; enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd); -- Gitee From 7755011563a37b1b3fae59ce5770a78ad85a99f8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 23 Oct 2024 15:30:09 +0300 Subject: [PATCH 5/6] ipv4: ip_tunnel: Fix suspicious RCU usage warning in ip_tunnel_find() mainline inclusion from mainline-v6.12-rc6~33^2~15 commit 90e0569dd3d32f4f4d2ca691d3fa5a8a14a13c12 category: bugfix issue: #IBAVZS CVE: CVE-2024-50304 Signed-off-by: maxinyi --------------------------------------- The per-netns IP tunnel hash table is protected by the RTNL mutex and ip_tunnel_find() is only called from the control path where the mutex is taken. Add a lockdep expression to hlist_for_each_entry_rcu() in ip_tunnel_find() in order to validate that the mutex is held and to silence the suspicious RCU usage warning [1]. [1] WARNING: suspicious RCU usage 6.12.0-rc3-custom-gd95d9a31aceb #139 Not tainted ----------------------------- net/ipv4/ip_tunnel.c:221 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by ip/362: #0: ffffffff86fc7cb0 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x377/0xf60 stack backtrace: CPU: 12 UID: 0 PID: 362 Comm: ip Not tainted 6.12.0-rc3-custom-gd95d9a31aceb #139 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 Call Trace: dump_stack_lvl+0xba/0x110 lockdep_rcu_suspicious.cold+0x4f/0xd6 ip_tunnel_find+0x435/0x4d0 ip_tunnel_newlink+0x517/0x7a0 ipgre_newlink+0x14c/0x170 __rtnl_newlink+0x1173/0x19c0 rtnl_newlink+0x6c/0xa0 rtnetlink_rcv_msg+0x3cc/0xf60 netlink_rcv_skb+0x171/0x450 netlink_unicast+0x539/0x7f0 netlink_sendmsg+0x8c1/0xd80 ____sys_sendmsg+0x8f9/0xc20 ___sys_sendmsg+0x197/0x1e0 __sys_sendmsg+0x122/0x1f0 do_syscall_64+0xbb/0x1d0 entry_SYSCALL_64_after_hwframe+0x77/0x7f Fixes: c54419321455 ("GRE: Refactor GRE tunneling code.") Suggested-by: Eric Dumazet Signed-off-by: Ido Schimmel Reviewed-by: Eric Dumazet Link: https://patch.msgid.link/20241023123009.749764-1-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: mxy --- net/ipv4/ip_tunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 99f70b990eb1..06aafa2687b8 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -218,7 +218,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn, struct ip_tunnel *t = NULL; struct hlist_head *head = ip_bucket(itn, parms); - hlist_for_each_entry_rcu(t, head, hash_node) { + hlist_for_each_entry_rcu(t, head, hash_node, lockdep_rtnl_is_held()) { if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr && link == t->parms.link && -- Gitee From b9138a36e6e80f731e20b393a1a3587664d50358 Mon Sep 17 00:00:00 2001 From: Furong Xu <0x1207@gmail.com> Date: Mon, 21 Oct 2024 14:10:23 +0800 Subject: [PATCH 6/6] net: stmmac: TSO: Fix unbalanced DMA map/unmap for non-paged SKB data mainline inclusion from mainline-v6.12-rc6~33^2~18 commit 66600fac7a984dea4ae095411f644770b2561ede category: bugfix issue: #IBAVZS CVE: CVE-2024-53058 Signed-off-by: maxinyi --------------------------------------- In case the non-paged data of a SKB carries protocol header and protocol payload to be transmitted on a certain platform that the DMA AXI address width is configured to 40-bit/48-bit, or the size of the non-paged data is bigger than TSO_MAX_BUFF_SIZE on a certain platform that the DMA AXI address width is configured to 32-bit, then this SKB requires at least two DMA transmit descriptors to serve it. For example, three descriptors are allocated to split one DMA buffer mapped from one piece of non-paged data: dma_desc[N + 0], dma_desc[N + 1], dma_desc[N + 2]. Then three elements of tx_q->tx_skbuff_dma[] will be allocated to hold extra information to be reused in stmmac_tx_clean(): tx_q->tx_skbuff_dma[N + 0], tx_q->tx_skbuff_dma[N + 1], tx_q->tx_skbuff_dma[N + 2]. Now we focus on tx_q->tx_skbuff_dma[entry].buf, which is the DMA buffer address returned by DMA mapping call. stmmac_tx_clean() will try to unmap the DMA buffer _ONLY_IF_ tx_q->tx_skbuff_dma[entry].buf is a valid buffer address. The expected behavior that saves DMA buffer address of this non-paged data to tx_q->tx_skbuff_dma[entry].buf is: tx_q->tx_skbuff_dma[N + 0].buf = NULL; tx_q->tx_skbuff_dma[N + 1].buf = NULL; tx_q->tx_skbuff_dma[N + 2].buf = dma_map_single(); Unfortunately, the current code misbehaves like this: tx_q->tx_skbuff_dma[N + 0].buf = dma_map_single(); tx_q->tx_skbuff_dma[N + 1].buf = NULL; tx_q->tx_skbuff_dma[N + 2].buf = NULL; On the stmmac_tx_clean() side, when dma_desc[N + 0] is closed by the DMA engine, tx_q->tx_skbuff_dma[N + 0].buf is a valid buffer address obviously, then the DMA buffer will be unmapped immediately. There may be a rare case that the DMA engine does not finish the pending dma_desc[N + 1], dma_desc[N + 2] yet. Now things will go horribly wrong, DMA is going to access a unmapped/unreferenced memory region, corrupted data will be transmited or iommu fault will be triggered :( In contrast, the for-loop that maps SKB fragments behaves perfectly as expected, and that is how the driver should do for both non-paged data and paged frags actually. This patch corrects DMA map/unmap sequences by fixing the array index for tx_q->tx_skbuff_dma[entry].buf when assigning DMA buffer address. Tested and verified on DWXGMAC CORE 3.20a Reported-by: Suraj Jaiswal Fixes: f748be531d70 ("stmmac: support new GMAC4") Signed-off-by: Furong Xu <0x1207@gmail.com> Reviewed-by: Hariprasad Kelam Reviewed-by: Simon Horman Link: https://patch.msgid.link/20241021061023.2162701-1-0x1207@gmail.com Signed-off-by: Paolo Abeni Signed-off-by: mxy --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e53497916535..b7ff6cab517b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3257,9 +3257,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; - tx_q->tx_skbuff_dma[first_entry].buf = des; - tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); - if (priv->dma_cap.addr64 <= 32) { first->des0 = cpu_to_le32(des); @@ -3278,6 +3275,21 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); + /* In case two or more DMA transmit descriptors are allocated for this + * non-paged SKB data, the DMA buffer address should be saved to + * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor, + * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee + * that stmmac_tx_clean() does not unmap the entire DMA buffer too early + * since the tail areas of the DMA buffer can be accessed by DMA engine + * sooner or later. + * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf + * corresponding to the last descriptor, stmmac_tx_clean() will unmap + * this DMA buffer right after the DMA engine completely finishes the + * full buffer transmission. + */ + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb); + /* Prepare fragments */ for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; -- Gitee