From 2868a7f0ae4c7c40788f468c4482542153829c23 Mon Sep 17 00:00:00 2001 From: Dong Yibo Date: Thu, 27 Nov 2025 15:51:27 +0800 Subject: [PATCH] RNP: NET: Update driver to 1.0.0 driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ID93BP CVE: NA -------------------------------- Update rnp driver to 1.0.0, main changes bellow: 1. Fix build warnings with W=1 C=1. 2. Fix xmastree error in codes. 3. Remove pcie err_handler(not support). 4. Support 10G-TP card. 5. Support force_on_close priv-flags. 6. Add csl console function support. 7. Add vf queue setup support in sys. 8. Get lldp status from fw now. 9. Remove count stats in sys. Fixes: 455d45ae0808 ("drivers: initial support for rnp drivers from Mucse Technology") Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnp/rnp.h | 16 +- drivers/net/ethernet/mucse/rnp/rnp_common.h | 18 +- drivers/net/ethernet/mucse/rnp/rnp_dcb.c | 16 +- drivers/net/ethernet/mucse/rnp/rnp_debugfs.c | 237 ++++++- drivers/net/ethernet/mucse/rnp/rnp_ethtool.c | 74 +- drivers/net/ethernet/mucse/rnp/rnp_lib.c | 79 +-- drivers/net/ethernet/mucse/rnp/rnp_main.c | 686 ++++++++++--------- drivers/net/ethernet/mucse/rnp/rnp_mbx.c | 24 +- drivers/net/ethernet/mucse/rnp/rnp_mbx.h | 3 +- drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c | 251 ++++--- drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h | 93 ++- drivers/net/ethernet/mucse/rnp/rnp_mpe.c | 7 +- drivers/net/ethernet/mucse/rnp/rnp_n10.c | 413 ++++++++--- drivers/net/ethernet/mucse/rnp/rnp_param.c | 4 +- drivers/net/ethernet/mucse/rnp/rnp_ptp.c | 23 +- drivers/net/ethernet/mucse/rnp/rnp_regs.h | 1 + drivers/net/ethernet/mucse/rnp/rnp_sriov.c | 107 ++- drivers/net/ethernet/mucse/rnp/rnp_sysfs.c | 512 ++++---------- drivers/net/ethernet/mucse/rnp/rnp_type.h | 8 +- drivers/net/ethernet/mucse/rnp/version.h | 2 +- 20 files changed, 1489 insertions(+), 1085 deletions(-) diff --git a/drivers/net/ethernet/mucse/rnp/rnp.h b/drivers/net/ethernet/mucse/rnp/rnp.h index fbb3271942a7..97fb0e519d55 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp.h +++ b/drivers/net/ethernet/mucse/rnp/rnp.h @@ -217,13 +217,13 @@ struct vf_macvlans { struct rnp_tx_buffer { struct rnp_tx_desc *next_to_watch; unsigned long time_stamp; - struct sk_buff *skb; - unsigned int bytecount; unsigned short gso_segs; + unsigned int bytecount; bool gso_need_padding; + struct sk_buff *skb; __be16 protocol; - __be16 priv_tags; + u16 priv_tags; DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_LEN(len); union { @@ -354,6 +354,7 @@ struct rnp_ring { #define RNP_RING_IRQ_MISS_FIX ((u32)(1 << 10)) #define RNP_RING_OUTER_VLAN_FIX ((u32)(1 << 11)) #define RNP_RING_CHKSM_FIX ((u32)(1 << 12)) +#define RNP_RING_LOWER_ITR ((u32)(1 << 13)) u8 pfvfnum; u16 count; /* amount of descriptors */ @@ -664,10 +665,12 @@ struct rnp_adapter { /* only rx itr is Supported */ int usecendcount; u16 rx_usecs; + u16 rx_usecs_usr_set; u16 rx_frames; u16 usecstocount; u16 tx_frames; u16 tx_usecs; + u16 tx_usecs_usr_set; u32 pkt_rate_low; u16 rx_usecs_low; u32 pkt_rate_high; @@ -787,6 +790,7 @@ struct rnp_adapter { #define RNP_PRIV_FLAG_SRIOV_VLAN_MODE BIT(23) #define RNP_PRIV_FLAG_REMAP_MODE BIT(24) #define RNP_PRIV_FLAG_LLDP_EN_STAT BIT(25) +#define RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE BIT(26) #define PRIV_DATA_EN BIT(7) int rss_func_mode; @@ -984,8 +988,10 @@ struct rnp_adapter { // struct rnp_info* info; bool dma2_in_1pf; - char name[60]; + void *csl_dma_buf; + dma_addr_t csl_dma_phy; + int csl_dma_size; }; struct rnp_fdir_filter { @@ -1141,7 +1147,7 @@ static inline int ignore_veb_vlan(struct rnp_adapter *adapter, union rnp_rx_desc *rx_desc) { if (unlikely((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && - (cpu_to_le16(rx_desc->wb.rev1) & + (le16_to_cpu(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN))) { return 1; } diff --git a/drivers/net/ethernet/mucse/rnp/rnp_common.h b/drivers/net/ethernet/mucse/rnp/rnp_common.h index 1ccbfa7c185f..c048cca22287 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_common.h +++ b/drivers/net/ethernet/mucse/rnp/rnp_common.h @@ -79,11 +79,11 @@ static inline unsigned int rnp_rd_reg(void *reg) do { \ dbg(" wr-reg: %p <== 0x%08x \t#%-4d %s\n", (reg), (val), \ __LINE__, __FILE__); \ - iowrite32((val), (void *)(reg)); \ + iowrite32((val), (reg)); \ } while (0) #else -#define rnp_rd_reg(reg) readl((void *)(reg)) -#define rnp_wr_reg(reg, val) writel((val), (void *)(reg)) +#define rnp_rd_reg(reg) readl(reg) +#define rnp_wr_reg(reg, val) writel(val, reg) #endif #define rd32(hw, off) rnp_rd_reg((hw)->hw_addr + (off)) @@ -173,9 +173,9 @@ static inline void hw_queue_strip_rx_vlan(struct rnp_hw *hw, u8 ring_num, #define DPRINTK(nlevel, klevel, fmt, args...) \ ((NETIF_MSG_##nlevel & adapter->msg_enable) ? \ - (void)(netdev_printk(KERN_##klevel, adapter->netdev, \ + ((void)netdev_printk(KERN_##klevel, adapter->netdev, \ fmt, ##args)) : \ - NULL) + (void)0) //==== log helper === #ifdef HW_DEBUG @@ -217,10 +217,10 @@ static inline void hw_queue_strip_rx_vlan(struct rnp_hw *hw, u8 ring_num, static inline void buf_dump_line(const char *msg, int line, void *buf, int len) { - int i, offset = 0; + u8 *ptr = (u8 *)buf; int msg_len = 1024; + int i, offset = 0; u8 msg_buf[1024]; - u8 *ptr = (u8 *)buf; offset += snprintf(msg_buf + offset, msg_len, "=== %s #%d line:%d buf:%p==\n000: ", msg, len, @@ -256,10 +256,10 @@ static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size) static inline void buf_dump(const char *msg, void *buf, int len) { - int i, offset = 0; + u8 *ptr = (u8 *)buf; int msg_len = 1024; + int i, offset = 0; u8 msg_buf[1024]; - u8 *ptr = (u8 *)buf; offset += snprintf(msg_buf + offset, msg_len, "=== %s #%d ==\n000: ", msg, len); diff --git a/drivers/net/ethernet/mucse/rnp/rnp_dcb.c b/drivers/net/ethernet/mucse/rnp/rnp_dcb.c index 1438e6a5806e..b5d7b783ac9d 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_dcb.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_dcb.c @@ -11,12 +11,12 @@ static void rnp_config_prio_map(struct rnp_adapter *adapter, u8 pfc_map) { - int i, j; - u32 prio_map = 0; - u8 port = adapter->port; - u8 *prio_tc = adapter->prio_tc_map; void __iomem *ioaddr = adapter->hw.hw_addr; + u8 *prio_tc = adapter->prio_tc_map; u8 num_tc = adapter->num_tc; + u8 port = adapter->port; + u32 prio_map = 0; + int i, j; for (i = 0; i < num_tc; i++) { if (i > RNP_MAX_TCS_NUM) @@ -50,9 +50,9 @@ static int rnp_dcb_hw_pfc_config(struct rnp_adapter *adapter, u8 pfc_map) { struct rnp_dcb_cfg *dcb = &adapter->dcb_cfg; void __iomem *ioaddr = adapter->hw.hw_addr; + u8 num_tc = adapter->num_tc; u8 i = 0, j = 0; u32 reg = 0; - u8 num_tc = adapter->num_tc; if (!(adapter->flags & RNP_FLAG_DCB_ENABLED) || adapter->num_rx_queues <= 1) { @@ -282,7 +282,9 @@ static int rnp_dcbnl_setpfc(struct net_device *dev, struct ieee_pfc *pfc) static u8 rnp_dcbnl_getpfcstate(struct net_device *netdev) { struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_pfc_cfg *pfc_cfg = &adapter->dcb_cfg.pfc_cfg; + struct rnp_pfc_cfg *pfc_cfg; + + pfc_cfg = &adapter->dcb_cfg.pfc_cfg; return pfc_cfg->pfc_en; } @@ -294,7 +296,7 @@ static void rnp_dcbnl_setpfcstate(struct net_device *netdev, u8 state) adapter->dcb_cfg.pfc_cfg.pfc_en = state; } -const struct dcbnl_rtnl_ops rnp_dcbnl_ops = { +static const struct dcbnl_rtnl_ops rnp_dcbnl_ops = { /*IEEE*/ .ieee_getpfc = rnp_dcbnl_getpfc, .ieee_setpfc = rnp_dcbnl_setpfc, diff --git a/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c b/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c index 20bf4d9bf6bb..d2b7cf871ab3 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_debugfs.c @@ -9,19 +9,205 @@ static struct dentry *rnp_dbg_root; +#define bus_to_virt phys_to_virt + static char rnp_dbg_reg_ops_buf[256] = ""; +static int rnp_dbg_csl_open(struct inode *inode, struct file *file) +{ + struct rnp_adapter *adapter; + int err, bytes = 4096; + void *dma_buf = NULL; + dma_addr_t dma_phy; + struct rnp_hw *hw; + + if (inode->i_private) + file->private_data = inode->i_private; + else + return -EIO; + + adapter = file->private_data; + + if (!adapter) + return -EIO; + + if (adapter->csl_dma_buf) + return 0; + + hw = &adapter->hw; + + dma_buf = dma_alloc_coherent(&hw->pdev->dev, + bytes, &dma_phy, GFP_ATOMIC); + if (!dma_buf) + return -ENOMEM; + + memset(dma_buf, 0, bytes); + + adapter->csl_dma_buf = dma_buf; + adapter->csl_dma_phy = dma_phy; + adapter->csl_dma_size = bytes; + + err = rnp_mbx_ddr_csl_enable(hw, 1, dma_phy, bytes); + if (err) { + dma_free_coherent(&hw->pdev->dev, bytes, dma_buf, dma_phy); + adapter->csl_dma_buf = NULL; + return -EIO; + } + + return 0; +} + +static int rnp_dbg_csl_release(struct inode *inode, struct file *file) +{ + struct rnp_adapter *adapter = file->private_data; + struct rnp_hw *hw = &adapter->hw; + + if (adapter->csl_dma_buf) { + rnp_mbx_ddr_csl_enable(hw, 0, 0, 0); + dma_free_coherent(&hw->pdev->dev, adapter->csl_dma_size, + adapter->csl_dma_buf, adapter->csl_dma_phy); + adapter->csl_dma_buf = NULL; + } + + return 0; +} + +static int rnp_dbg_csl_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct rnp_adapter *adapter = file->private_data; + dma_addr_t dma_phy = adapter->csl_dma_phy; + int dma_bytes = adapter->csl_dma_size; + void *dma_buf = adapter->csl_dma_buf; + unsigned long length; + int ret = 0; + + length = (unsigned long)(vma->vm_end - vma->vm_start); + + if (length > dma_bytes) + return -EIO; + if (vma->vm_pgoff == 0) { + ret = dma_mmap_coherent(&adapter->pdev->dev, + vma, dma_buf, + dma_phy, length); + } else { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, + PFN_DOWN(virt_to_phys(bus_to_virt(dma_phy))) + + vma->vm_pgoff, + length, vma->vm_page_prot); + } + + if (ret < 0) + return ret; + + return 0; +} + +static const struct file_operations rnp_dbg_csl_fops = { + .owner = THIS_MODULE, + .open = rnp_dbg_csl_open, + .release = rnp_dbg_csl_release, + .mmap = rnp_dbg_csl_mmap, +}; + +static ssize_t rnp_dbg_eth_info_read(struct file *file, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = file->private_data; + char *buf = NULL; + int len; + + if (!adapter) + return -EIO; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "bd:%d port%d %s %s\n", adapter->bd_number, + 0, adapter->netdev->name, pci_name(adapter->pdev)); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static const struct file_operations rnp_dbg_eth_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_eth_info_read, +}; + +static ssize_t rnp_dbg_mbx_cookies_info_read(struct file *file, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct rnp_adapter *adapter = file->private_data; + struct mbx_req_cookie_pool *cookie_pool = &adapter->hw.mbx.cookie_pool; + int free_cnt = 0, wait_timout_cnt = 0; + struct mbx_req_cookie *cookie; + int alloced_cnt = 0; + char *buf = NULL; + int len, i; + + if (!adapter) + return -EIO; + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + for (i = 0; i < MAX_COOKIES_ITEMS; i++) { + cookie = &cookie_pool->cookies[i]; + if (cookie->stat == COOKIE_FREE) + free_cnt++; + else if (cookie->stat == COOKIE_FREE_WAIT_TIMEOUT) + wait_timout_cnt++; + else if (cookie->stat == COOKIE_ALLOCED) + alloced_cnt++; + } + + buf = kasprintf(GFP_KERNEL, "pool: cur:%d total: %d free:%d wait_free:%d alloced:%d\n", + cookie_pool->next_idx, + MAX_COOKIES_ITEMS, + free_cnt, wait_timout_cnt, alloced_cnt); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +static const struct file_operations rnp_dbg_mbx_cookies_info_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = rnp_dbg_mbx_cookies_info_read, +}; + /** * rnp_dbg_reg_ops_read - read for reg_ops datum - * @filp: the opened file + * @file: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ -static ssize_t rnp_dbg_reg_ops_read(struct file *filp, char __user *buffer, +static ssize_t rnp_dbg_reg_ops_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { - struct rnp_adapter *adapter = filp->private_data; + struct rnp_adapter *adapter = file->private_data; char *buf; int len; @@ -48,16 +234,16 @@ static ssize_t rnp_dbg_reg_ops_read(struct file *filp, char __user *buffer, /** * rnp_dbg_reg_ops_write - write into reg_ops datum - * @filp: the opened file + * @file: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset **/ -static ssize_t rnp_dbg_reg_ops_write(struct file *filp, +static ssize_t rnp_dbg_reg_ops_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - struct rnp_adapter *adapter = filp->private_data; + struct rnp_adapter *adapter = file->private_data; struct rnp_hw *hw = &adapter->hw; int len; @@ -132,16 +318,16 @@ static char rnp_dbg_netdev_ops_buf[256] = ""; /** * rnp_dbg_netdev_ops_read - read for netdev_ops datum - * @filp: the opened file + * @file: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ -static ssize_t rnp_dbg_netdev_ops_read(struct file *filp, +static ssize_t rnp_dbg_netdev_ops_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { - struct rnp_adapter *adapter = filp->private_data; + struct rnp_adapter *adapter = file->private_data; char *buf; int len; @@ -168,16 +354,16 @@ static ssize_t rnp_dbg_netdev_ops_read(struct file *filp, /** * rnp_dbg_netdev_ops_write - write into netdev_ops datum - * @filp: the opened file + * @file: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset **/ -static ssize_t rnp_dbg_netdev_ops_write(struct file *filp, +static ssize_t rnp_dbg_netdev_ops_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { - struct rnp_adapter *adapter = filp->private_data; + struct rnp_adapter *adapter = file->private_data; int len; /* don't allow partial writes */ @@ -219,15 +405,15 @@ static const struct file_operations rnp_dbg_netdev_ops_fops = { .write = rnp_dbg_netdev_ops_write, }; -static ssize_t rnp_dbg_netdev_temp_read(struct file *filp, +static ssize_t rnp_dbg_netdev_temp_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { - struct rnp_adapter *adapter = filp->private_data; + struct rnp_adapter *adapter = file->private_data; struct rnp_hw *hw = &adapter->hw; + int temp = 0, voltage = 0; char *buf; int len; - int temp = 0, voltage = 0; /* don't allow partial reads */ if (*ppos != 0) @@ -289,6 +475,27 @@ void rnp_dbg_adapter_init(struct rnp_adapter *adapter) adapter, &rnp_dbg_netdev_temp); if (!pfile) e_dev_err("debugfs temp for %s failed\n", name); + + if (rnp_is_pf1(&adapter->hw) == 0) { + pfile = debugfs_create_file_unsafe("csl", 0755, + adapter->rnp_dbg_adapter, + adapter, &rnp_dbg_csl_fops); + if (!pfile) + e_dev_err("debugfs csl failed\n"); + } + pfile = debugfs_create_file("info", 0600, + adapter->rnp_dbg_adapter, + adapter, + &rnp_dbg_eth_info_fops); + if (!pfile) + e_dev_err("debugfs info failed\n"); + pfile = debugfs_create_file("mbx_cookies_info", 0600, + adapter->rnp_dbg_adapter, + adapter, + &rnp_dbg_mbx_cookies_info_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for mbx_cookies_info failed\n"); + } else { e_dev_err("debugfs entry for %s failed\n", name); } diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c index 1702f18c2724..df5ca4ba394e 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_ethtool.c @@ -123,9 +123,9 @@ static struct rnp_reg_test reg_test_n10[] = { static bool reg_pattern_test(struct rnp_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { - u32 pat, val, before; static const u32 test_pattern[] = { 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF }; + u32 pat, val, before; for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { before = readl(adapter->hw.hw_addr + reg); @@ -177,8 +177,8 @@ static bool reg_set_and_check(struct rnp_adapter *adapter, u64 *data, **/ static bool rnp_reg_test(struct rnp_adapter *adapter, u64 *data) { - struct rnp_reg_test *test; struct rnp_hw *hw = &adapter->hw; + struct rnp_reg_test *test; u32 i; if (RNP_REMOVED(hw->hw_addr)) { @@ -243,8 +243,8 @@ static bool rnp_reg_test(struct rnp_adapter *adapter, u64 *data) static int rnp_link_test(struct rnp_adapter *adapter, u64 *data) { struct rnp_hw *hw = &adapter->hw; - bool link_up; u32 link_speed = 0; + bool link_up; bool duplex; *data = 0; @@ -259,8 +259,8 @@ void rnp_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) { struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_hw *hw = &adapter->hw; bool if_running = netif_running(netdev); + struct rnp_hw *hw = &adapter->hw; set_bit(__RNP_TESTING, &adapter->state); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { @@ -354,9 +354,9 @@ void rnp_diag_test(struct net_device *netdev, int rnp_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) { - int err; struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int err; err = rnp_mbx_get_lane_stat(hw); if (err) @@ -493,8 +493,8 @@ int rnp_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) static unsigned int rnp_max_channels(struct rnp_adapter *adapter) { - unsigned int max_combined; struct rnp_hw *hw = &adapter->hw; + unsigned int max_combined; /* SR-IOV currently only allows 2 queue on the PF */ /* dcb on max support 32 */ @@ -649,9 +649,9 @@ int rnp_get_module_info(struct net_device *dev, int rnp_get_module_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { + u16 start = eeprom->offset, length = eeprom->len; struct rnp_adapter *adapter = netdev_priv(dev); struct rnp_hw *hw = &adapter->hw; - u16 start = eeprom->offset, length = eeprom->len; int rc = 0; rnp_mbx_get_lane_stat(hw); @@ -726,9 +726,9 @@ int rnp_set_ringparam(struct net_device *netdev, struct netlink_ext_ack __always_unused *extack) { struct rnp_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; struct rnp_ring *temp_ring; int i, err = 0; - u32 new_rx_count, new_tx_count; /* sriov mode can't change ring param */ if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) @@ -838,6 +838,7 @@ int rnp_set_ringparam(struct net_device *netdev, temp_ring[i].temp_count; adapter->rx_ring[i]->reset_count = new_rx_count; + new_rx_count = temp_ring[i].temp_count; } err = rnp_setup_rx_resources(&temp_ring[i], adapter); @@ -897,8 +898,8 @@ int rnp_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) int rnp_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) { - int err; struct rnp_adapter *adapter = netdev_priv(netdev); + int err; err = rnp_mbx_get_dump(&adapter->hw, dump->flag, buffer, dump->len); @@ -945,13 +946,13 @@ int rnp_get_coalesce(struct net_device *netdev, struct rnp_adapter *adapter = netdev_priv(netdev); coal->use_adaptive_tx_coalesce = adapter->adaptive_tx_coal; - coal->tx_coalesce_usecs = adapter->tx_usecs; + coal->tx_coalesce_usecs = adapter->tx_usecs_usr_set; coal->tx_coalesce_usecs_irq = 0; coal->tx_max_coalesced_frames = adapter->tx_frames; coal->tx_max_coalesced_frames_irq = adapter->tx_work_limit; coal->use_adaptive_rx_coalesce = adapter->adaptive_rx_coal; coal->rx_coalesce_usecs_irq = 0; - coal->rx_coalesce_usecs = adapter->rx_usecs; + coal->rx_coalesce_usecs = adapter->rx_usecs_usr_set; coal->rx_max_coalesced_frames = adapter->rx_frames; coal->rx_max_coalesced_frames_irq = adapter->napi_budge; @@ -985,8 +986,8 @@ int rnp_set_coalesce(struct net_device *netdev, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { - int reset = 0; struct rnp_adapter *adapter = netdev_priv(netdev); + int reset = 0; u32 value; /* we don't support close tx and rx coalesce */ @@ -1026,6 +1027,7 @@ int rnp_set_coalesce(struct net_device *netdev, if (adapter->tx_usecs != value) { reset = 1; adapter->tx_usecs = value; + adapter->tx_usecs_usr_set = value; } if (ec->rx_max_coalesced_frames_irq < RNP_MIN_RX_WORK || @@ -1059,6 +1061,7 @@ int rnp_set_coalesce(struct net_device *netdev, if (adapter->rx_usecs != value) { reset = 1; adapter->rx_usecs = value; + adapter->rx_usecs_usr_set = value; } /* other setup is not supported */ @@ -1126,8 +1129,8 @@ static int rnp_get_ethtool_fdir_entry(struct rnp_adapter *adapter, { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; - struct hlist_node *node2; struct rnp_fdir_filter *rule = NULL; + struct hlist_node *node2; /* report total rule count */ cmd->data = adapter->fdir_pballoc; @@ -1171,7 +1174,7 @@ static int rnp_get_ethtool_fdir_entry(struct rnp_adapter *adapter, /* support proto and mask only in this mode */ fsp->h_u.ether_spec.h_proto = rule->filter.layer2_formate.proto; - fsp->m_u.ether_spec.h_proto = 0xffff; + fsp->m_u.ether_spec.h_proto = cpu_to_be16(0xffff); break; default: return -EINVAL; @@ -1187,10 +1190,10 @@ static int rnp_get_ethtool_fdir_entry(struct rnp_adapter *adapter, rule->filter.formatted.src_ip[0]; fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; - fsp->m_u.tcp_ip4_spec.psrc = 0xffff; - fsp->m_u.tcp_ip4_spec.pdst = 0xffff; - fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; - fsp->m_u.tcp_ip4_spec.ip4dst = 0xffffffff; + fsp->m_u.tcp_ip4_spec.psrc = cpu_to_be16(0xffff); + fsp->m_u.tcp_ip4_spec.pdst = cpu_to_be16(0xffff); + fsp->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(0xffffffff); + fsp->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(0xffffffff); } else { fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port & @@ -1241,8 +1244,8 @@ static int rnp_get_ethtool_fdir_all(struct rnp_adapter *adapter, struct ethtool_rxnfc *cmd, u32 *rule_locs) { - struct hlist_node *node2; struct rnp_fdir_filter *rule; + struct hlist_node *node2; int cnt = 0; /* report total rule count */ @@ -1270,8 +1273,8 @@ int rnp_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct rnp_adapter *adapter = netdev_priv(netdev); - int ret = -EOPNOTSUPP; struct rnp_hw *hw = &adapter->hw; + int ret = -EOPNOTSUPP; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: @@ -1351,8 +1354,8 @@ static int rnp_flowspec_to_flow_type(struct rnp_adapter *adapter, u8 *flow_type, struct rnp_fdir_filter *input) { - int i; int ret = 1; + int i; /* not support flow_ext */ if (fsp->flow_type & FLOW_EXT) return 0; @@ -1434,12 +1437,12 @@ static int rnp_flowspec_to_flow_type(struct rnp_adapter *adapter, ret = 0; } if (fsp->h_u.usr_ip4_spec.ip4src != 0 && - fsp->m_u.usr_ip4_spec.ip4src != 0xffffffff) { + fsp->m_u.usr_ip4_spec.ip4src != cpu_to_be32(0xffffffff)) { e_err(drv, "ip src mask error\n"); ret = 0; } if (fsp->h_u.usr_ip4_spec.ip4dst != 0 && - fsp->m_u.usr_ip4_spec.ip4dst != 0xffffffff) { + fsp->m_u.usr_ip4_spec.ip4dst != cpu_to_be32(0xffffffff)) { e_err(drv, "ip dst mask error\n"); ret = 0; } @@ -1467,22 +1470,22 @@ static int rnp_flowspec_to_flow_type(struct rnp_adapter *adapter, ret = 0; } if (fsp->h_u.tcp_ip4_spec.ip4src != 0 && - fsp->m_u.tcp_ip4_spec.ip4src != 0xffffffff) { + fsp->m_u.tcp_ip4_spec.ip4src != cpu_to_be32(0xffffffff)) { e_err(drv, "src mask error\n"); ret = 0; } if (fsp->h_u.tcp_ip4_spec.ip4dst != 0 && - fsp->m_u.tcp_ip4_spec.ip4dst != 0xffffffff) { + fsp->m_u.tcp_ip4_spec.ip4dst != cpu_to_be32(0xffffffff)) { e_err(drv, "dst mask error\n"); ret = 0; } if (fsp->h_u.tcp_ip4_spec.psrc != 0 && - fsp->m_u.tcp_ip4_spec.psrc != 0xffff) { + fsp->m_u.tcp_ip4_spec.psrc != cpu_to_be16(0xffff)) { e_err(drv, "src port mask error\n"); ret = 0; } if (fsp->h_u.tcp_ip4_spec.pdst != 0 && - fsp->m_u.tcp_ip4_spec.pdst != 0xffff) { + fsp->m_u.tcp_ip4_spec.pdst != cpu_to_be16(0xffff)) { e_err(drv, "src port mask error\n"); ret = 0; } @@ -1513,12 +1516,12 @@ int rnp_update_ethtool_fdir_entry(struct rnp_adapter *adapter, struct rnp_fdir_filter *input, u16 sw_idx) { + struct rnp_fdir_filter *rule, *parent; struct rnp_hw *hw = &adapter->hw; struct hlist_node *node2; - struct rnp_fdir_filter *rule, *parent; - bool deleted = false; u16 hw_idx_layer2 = 0; u16 hw_idx_tuple5 = 0; + bool deleted = false; s32 err; parent = NULL; @@ -1635,11 +1638,11 @@ static int rnp_add_ethtool_fdir_entry(struct rnp_adapter *adapter, { struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; - struct rnp_fdir_filter *input; + u32 ring_cookie_high = fsp->ring_cookie >> 32; struct rnp_hw *hw = &adapter->hw; - int err; + struct rnp_fdir_filter *input; int vf_fix = 0; - u32 ring_cookie_high = fsp->ring_cookie >> 32; + int err; if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) vf_fix = 1; @@ -1705,9 +1708,9 @@ static int rnp_add_ethtool_fdir_entry(struct rnp_adapter *adapter, input->filter.formatted.dst_ip_mask[0] = fsp->m_u.usr_ip4_spec.ip4dst; input->filter.formatted.src_port = 0; - input->filter.formatted.src_port_mask = 0xffff; + input->filter.formatted.src_port_mask = cpu_to_be16(0xffff); input->filter.formatted.dst_port = 0; - input->filter.formatted.dst_port_mask = 0xffff; + input->filter.formatted.dst_port_mask = cpu_to_be16(0xffff); input->filter.formatted.inner_mac[0] = fsp->h_u.usr_ip4_spec.proto; input->filter.formatted.inner_mac_mask[0] = @@ -1904,9 +1907,10 @@ int rnp_set_rxfh(struct net_device *netdev, const u32 *indir, { struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + u32 reta_entries; int i; - u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); + reta_entries = rnp_rss_indir_tbl_entries(adapter); if (hfunc) { if (hw->ops.set_rss_hfunc) { if (hw->ops.set_rss_hfunc(hw, hfunc)) diff --git a/drivers/net/ethernet/mucse/rnp/rnp_lib.c b/drivers/net/ethernet/mucse/rnp/rnp_lib.c index 771b8ea6cfe7..7c8691532a11 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_lib.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_lib.c @@ -41,12 +41,12 @@ static bool rnp_cache_ring_dcb_sriov(struct rnp_adapter *adapter) static bool rnp_cache_ring_dcb(struct rnp_adapter *adapter) { struct net_device *dev = adapter->netdev; - unsigned int tx_idx, rx_idx; - int tc, offset, rss_i, i, step; u8 num_tcs = netdev_get_num_tc(dev); - struct rnp_ring *ring; struct rnp_hw *hw = &adapter->hw; struct rnp_dma_info *dma = &hw->dma; + int tc, offset, rss_i, i, step; + unsigned int tx_idx, rx_idx; + struct rnp_ring *ring; /* verify we have DCB queueing enabled before proceeding */ if (num_tcs <= 1) @@ -121,12 +121,11 @@ static bool rnp_cache_ring_sriov(struct rnp_adapter *adapter) **/ static bool rnp_cache_ring_rss(struct rnp_adapter *adapter) { - int i; - /* setup here */ - int ring_step = 1; - struct rnp_ring *ring; struct rnp_hw *hw = &adapter->hw; struct rnp_dma_info *dma = &hw->dma; + struct rnp_ring *ring; + int ring_step = 1; + int i; /* n400 use 0 4 8 c */ if (hw->hw_type == rnp_hw_n400) @@ -208,10 +207,10 @@ static void rnp_cache_ring_register(struct rnp_adapter *adapter) **/ static bool rnp_set_dcb_sriov_queues(struct rnp_adapter *adapter) { - int i; u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; - u16 vmdq_m = 0; u8 tcs = netdev_get_num_tc(adapter->netdev); + u16 vmdq_m = 0; + int i; /* verify we have DCB queueing enabled before proceeding */ if (tcs <= 1) @@ -316,10 +315,10 @@ static bool rnp_set_dcb_queues(struct rnp_adapter *adapter) **/ static bool rnp_set_sriov_queues(struct rnp_adapter *adapter) { - u16 vmdq_m = 0; u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; u16 rss_m = RNP_RSS_DISABLED_MASK; struct rnp_hw *hw = &adapter->hw; + u16 vmdq_m = 0; /* only proceed if SR-IOV is enabled */ if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) @@ -491,16 +490,14 @@ static inline void rnp_irq_disable_queues(struct rnp_q_vector *q_vector) static enum hrtimer_restart irq_miss_check(struct hrtimer *hrtimer) { + struct rnp_tx_buffer *tx_buffer; struct rnp_q_vector *q_vector; - struct rnp_ring *ring; struct rnp_tx_desc *eop_desc; struct rnp_adapter *adapter; - + union rnp_rx_desc *rx_desc; + struct rnp_ring *ring; int tx_next_to_clean; int tx_next_to_use; - - struct rnp_tx_buffer *tx_buffer; - union rnp_rx_desc *rx_desc; int size; q_vector = container_of(hrtimer, struct rnp_q_vector, @@ -577,16 +574,16 @@ static int rnp_alloc_q_vector(struct rnp_adapter *adapter, int eth_queue_idx, int v_idx, int r_idx, int r_count, int step) { - struct rnp_q_vector *q_vector; - struct rnp_ring *ring; + int rxr_idx = r_idx, txr_idx = r_idx; struct rnp_hw *hw = &adapter->hw; struct rnp_dma_info *dma = &hw->dma; + int txr_count, rxr_count, idx; + struct rnp_q_vector *q_vector; int node = NUMA_NO_NODE; - int cpu = -1; + struct rnp_ring *ring; int ring_count, size; - int txr_count, rxr_count, idx; - int rxr_idx = r_idx, txr_idx = r_idx; int cpu_offset = 0; + int cpu = -1; rxr_count = r_count; txr_count = rxr_count; @@ -721,7 +718,6 @@ static int rnp_alloc_q_vector(struct rnp_adapter *adapter, ring++; } if (hw->hw_type == rnp_hw_n10 || hw->hw_type == rnp_hw_n400) { - q_vector->vector_flags |= RNP_QVECTOR_FLAG_IRQ_MISS_CHECK; /* initialize timer */ q_vector->irq_check_usecs = 1000; hrtimer_init(&q_vector->irq_miss_check_timer, @@ -760,8 +756,7 @@ static void rnp_free_q_vector(struct rnp_adapter *adapter, int v_idx) adapter->q_vector[v_idx] = NULL; netif_napi_del(&q_vector->napi); - if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) - hrtimer_cancel(&q_vector->irq_miss_check_timer); + hrtimer_cancel(&q_vector->irq_miss_check_timer); /* rnp_get_stats64() might access the rings on this vector, * we must wait a grace period before freeing it. @@ -778,14 +773,14 @@ static void rnp_free_q_vector(struct rnp_adapter *adapter, int v_idx) **/ static int rnp_alloc_q_vectors(struct rnp_adapter *adapter) { - int v_idx = adapter->q_vector_off; - int ring_idx = 0; - int r_remaing = - min_t(int, adapter->num_tx_queues, adapter->num_rx_queues); - int ring_step = 1; int err, ring_cnt, v_remaing = adapter->num_q_vectors; - int q_vector_nums = 0; + int r_remaing = min_t(int, adapter->num_tx_queues, + adapter->num_rx_queues); + int v_idx = adapter->q_vector_off; struct rnp_hw *hw = &adapter->hw; + int q_vector_nums = 0; + int ring_step = 1; + int ring_idx = 0; if (adapter->flags & RNP_FLAG_SRIOV_ENABLED) { ring_idx = 0; @@ -897,9 +892,9 @@ static void rnp_reset_interrupt_capability(struct rnp_adapter *adapter) **/ static int rnp_set_interrupt_capability(struct rnp_adapter *adapter) { + int irq_mode_back = adapter->irq_mode; struct rnp_hw *hw = &adapter->hw; int vector, v_budget, err = 0; - int irq_mode_back = adapter->irq_mode; v_budget = min_t(int, adapter->num_tx_queues, adapter->num_rx_queues); @@ -971,9 +966,9 @@ static int rnp_set_interrupt_capability(struct rnp_adapter *adapter) static void rnp_print_ring_info(struct rnp_adapter *adapter) { - int i; - struct rnp_ring *ring; struct rnp_q_vector *q_vector; + struct rnp_ring *ring; + int i; rnp_dbg("tx_queue count %d\n", adapter->num_tx_queues); rnp_dbg("queue-mapping :\n"); @@ -1079,9 +1074,9 @@ void rnp_tx_ctxtdesc(struct rnp_ring *tx_ring, u32 mss_len_vf_num, u32 inner_vlan_tunnel_len, int ignore_vlan, bool crc_pad) { + struct rnp_adapter *adapter = RING2ADAPT(tx_ring); struct rnp_tx_ctx_desc *context_desc; u16 i = tx_ring->next_to_use; - struct rnp_adapter *adapter = RING2ADAPT(tx_ring); u32 type_tucmd = 0; context_desc = RNP_TX_CTXTDESC(tx_ring, i); @@ -1119,7 +1114,7 @@ void rnp_tx_ctxtdesc(struct rnp_ring *tx_ring, u32 mss_len_vf_num, if (tx_ring->q_vector->adapter->flags & RNP_FLAG_SRIOV_ENABLED) { if (ignore_vlan) context_desc->inner_vlan_tunnel_len |= - VF_VEB_IGNORE_VLAN; + cpu_to_le32(VF_VEB_IGNORE_VLAN); } buf_dump_line("ctx ", __LINE__, context_desc, sizeof(*context_desc)); @@ -1140,9 +1135,9 @@ void rnp_store_reta(struct rnp_adapter *adapter) { u32 i, reta_entries = rnp_rss_indir_tbl_entries(adapter); struct rnp_hw *hw = &adapter->hw; - u32 reta = 0; /* relative with rss table */ struct rnp_ring *rx_ring; + u32 reta = 0; /* Write redirection table to HW */ for (i = 0; i < reta_entries; i++) { @@ -1160,16 +1155,16 @@ void rnp_store_reta(struct rnp_adapter *adapter) void rnp_store_key(struct rnp_adapter *adapter) { - struct rnp_hw *hw = &adapter->hw; bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + struct rnp_hw *hw = &adapter->hw; hw->ops.set_rss_key(hw, sriov_flag); } int rnp_init_rss_key(struct rnp_adapter *adapter) { - struct rnp_hw *hw = &adapter->hw; bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + struct rnp_hw *hw = &adapter->hw; /* only init rss key once */ /* no change rss key if user input one */ @@ -1184,12 +1179,12 @@ int rnp_init_rss_key(struct rnp_adapter *adapter) int rnp_init_rss_table(struct rnp_adapter *adapter) { + u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); int rx_nums = adapter->num_rx_queues; - int i, j; struct rnp_hw *hw = &adapter->hw; struct rnp_ring *rx_ring; u32 reta = 0; - u32 reta_entries = rnp_rss_indir_tbl_entries(adapter); + int i, j; if (adapter->flags & RNP_FLAG_DCB_ENABLED) { rx_nums = rx_nums / adapter->num_tc; @@ -1278,8 +1273,8 @@ s32 rnp_fdir_erase_perfect_filter(int fdir_mode, struct rnp_hw *hw, u32 rnp_tx_desc_unused_sw(struct rnp_ring *tx_ring) { - u16 ntu = tx_ring->next_to_use; u16 ntc = tx_ring->next_to_clean; + u16 ntu = tx_ring->next_to_use; u16 count = tx_ring->count; return ((ntu >= ntc) ? (count - ntu + ntc) : (ntc - ntu)); @@ -1306,9 +1301,9 @@ u32 rnp_tx_desc_unused_hw(struct rnp_hw *hw, struct rnp_ring *tx_ring) s32 rnp_disable_rxr_maxrate(struct net_device *netdev, u8 queue_index) { struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_hw *hw = &adapter->hw; struct rnp_ring *rx_ring = adapter->rx_ring[queue_index]; u32 reg_idx = rx_ring->rnp_queue_idx; + struct rnp_hw *hw = &adapter->hw; /* disable which dma ring in maxrate limit mode */ wr32(hw, RNP_SELECT_RING_EN(reg_idx), 0); @@ -1322,9 +1317,9 @@ s32 rnp_enable_rxr_maxrate(struct net_device *netdev, u8 queue_index, u32 maxrate) { struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_hw *hw = &adapter->hw; struct rnp_ring *rx_ring = adapter->rx_ring[queue_index]; u32 reg_idx = rx_ring->rnp_queue_idx; + struct rnp_hw *hw = &adapter->hw; u32 real_rate = maxrate / 16; if (!real_rate) diff --git a/drivers/net/ethernet/mucse/rnp/rnp_main.c b/drivers/net/ethernet/mucse/rnp/rnp_main.c index 70fa64b0b9ad..8b051b970654 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_main.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_main.c @@ -45,10 +45,10 @@ char rnp_driver_name[] = "rnp"; static const char rnp_driver_string[] = "mucse 1/10/25/40 Gigabit PCI Express Network Driver"; -#define DRV_VERSION "0.3.8" +#define DRV_VERSION "1.0.0" #include "version.h" -const char rnp_driver_version[] = DRV_VERSION GIT_COMMIT; +const char rnp_driver_version[] = DRV_VERSION; static const char rnp_copyright[] = "Copyright (c) 2020-2023 mucse Corporation."; @@ -56,7 +56,6 @@ static struct rnp_info *rnp_info_tbl[] = { [board_n10] = &rnp_n10_info, [board_n400] = &rnp_n400_info, }; - static int register_mbx_irq(struct rnp_adapter *adapter); static void remove_mbx_irq(struct rnp_adapter *adapter); @@ -210,9 +209,9 @@ static u64 rnp_get_tx_pending(struct rnp_ring *ring) static inline bool rnp_check_tx_hang(struct rnp_ring *tx_ring) { - u32 tx_done = rnp_get_tx_completed(tx_ring); u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = rnp_get_tx_pending(tx_ring); + u32 tx_done = rnp_get_tx_completed(tx_ring); bool ret = false; clear_check_for_tx_hang(tx_ring); @@ -286,11 +285,11 @@ static bool rnp_clean_tx_irq(struct rnp_q_vector *q_vector, struct rnp_ring *tx_ring, int napi_budget) { struct rnp_adapter *adapter = q_vector->adapter; - struct rnp_tx_buffer *tx_buffer; - struct rnp_tx_desc *tx_desc; u64 total_bytes = 0, total_packets = 0; int budget = q_vector->tx.work_limit; + struct rnp_tx_buffer *tx_buffer; int i = tx_ring->next_to_clean; + struct rnp_tx_desc *tx_desc; if (test_bit(__RNP_DOWN, &adapter->state)) return true; @@ -407,7 +406,7 @@ static inline void rnp_rx_hash(struct rnp_ring *ring, if (!(ring->netdev->features & NETIF_F_RXHASH)) return; #define RNP_RSS_TYPE_MASK 0xc0 - rss_type = rx_desc->wb.cmd & RNP_RSS_TYPE_MASK; + rss_type = le16_to_cpu(rx_desc->wb.cmd) & RNP_RSS_TYPE_MASK; skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash), rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } @@ -431,7 +430,7 @@ static inline void rnp_rx_checksum(struct rnp_ring *ring, if (!(ring->ring_flags & RNP_RING_NO_TUNNEL_SUPPORT)) { if (rnp_get_stat(rx_desc, RNP_RXD_STAT_TUNNEL_MASK) == - RNP_RXD_STAT_TUNNEL_VXLAN) { + cpu_to_le16(RNP_RXD_STAT_TUNNEL_VXLAN)) { encap_pkt = true; skb->encapsulation = 1; skb->ip_summed = CHECKSUM_NONE; @@ -526,7 +525,9 @@ static void rnp_process_skb_fields(struct rnp_ring *rx_ring, struct sk_buff *skb) { struct net_device *dev = rx_ring->netdev; - struct rnp_adapter *adapter = netdev_priv(dev); + struct rnp_adapter *adapter; + + adapter = netdev_priv(dev); rnp_rx_hash(rx_ring, rx_desc, skb); @@ -598,7 +599,7 @@ static bool rnp_check_csum_error(struct rnp_ring *rx_ring, /* we should ignore l4 csum error */ if (unlikely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_L4_MASK) && - (!(rx_desc->wb.rev1 & + (!(le16_to_cpu(rx_desc->wb.rev1) & RNP_RX_L3_TYPE_MASK)))) { rx_ring->rx_stats.csum_err--; goto skip_fix; @@ -607,8 +608,9 @@ static bool rnp_check_csum_error(struct rnp_ring *rx_ring, if (unlikely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_SCTP_MASK))) { /* sctp mask only valid if size > 60 and with ipv4 */ - if (size > 60 && (rx_desc->wb.rev1 & - RNP_RX_L3_TYPE_MASK)) { + if (size > 60 && + (le16_to_cpu(rx_desc->wb.rev1) & + RNP_RX_L3_TYPE_MASK)) { err = true; } else { /* sctp less than 60 hw report err by mistake */ @@ -643,6 +645,8 @@ static bool rnp_check_csum_error(struct rnp_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, DMA_FROM_DEVICE); + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; #if (PAGE_SIZE < 8192) rx_buffer->page_offset ^= truesize; #else @@ -688,6 +692,7 @@ static int rnp_rx_ring_reinit(struct rnp_adapter *adapter, err = rnp_setup_rx_resources(temp_ring, adapter); if (err) { rnp_free_rx_resources(temp_ring); + vfree(temp_ring); goto err_setup; } rnp_free_rx_resources(rx_ring); @@ -720,6 +725,8 @@ static inline unsigned int rnp_rx_offset(struct rnp_ring *rx_ring) static unsigned int rnp_get_headlen(unsigned char *data, unsigned int max_len) { + __be16 protocol; + u8 nexthdr = 0; /* default to not TCP */ union { unsigned char *network; /* l2 headers */ @@ -729,8 +736,6 @@ static unsigned int rnp_get_headlen(unsigned char *data, struct iphdr *ipv4; struct ipv6hdr *ipv6; } hdr; - __be16 protocol; - u8 nexthdr = 0; /* default to not TCP */ u8 hlen; /* this should never happen, but better safe than sorry */ @@ -823,7 +828,8 @@ static inline bool rnp_page_is_reserved(struct page *page) page_is_pfmemalloc(page); } -static bool rnp_can_reuse_rx_page(struct rnp_rx_buffer *rx_buffer) +static bool rnp_can_reuse_rx_page(struct rnp_rx_buffer *rx_buffer, + int size) { unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; struct page *page = rx_buffer->page; @@ -847,7 +853,7 @@ static bool rnp_can_reuse_rx_page(struct rnp_rx_buffer *rx_buffer) * still less than one buffer in size. */ #define RNP_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - RNP_RXBUFFER_2K) - if (rx_buffer->page_offset > RNP_LAST_OFFSET) + if (rx_buffer->page_offset > (RNP_LAST_OFFSET - size)) return false; #endif @@ -873,8 +879,8 @@ static bool rnp_can_reuse_rx_page(struct rnp_rx_buffer *rx_buffer) static void rnp_reuse_rx_page(struct rnp_ring *rx_ring, struct rnp_rx_buffer *old_buff) { - struct rnp_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; + struct rnp_rx_buffer *new_buff; new_buff = &rx_ring->rx_buffer_info[nta]; @@ -934,8 +940,8 @@ static bool rnp_check_src_mac(struct sk_buff *skb, struct net_device *netdev) { char *data = (char *)skb->data; - bool ret = false; struct netdev_hw_addr *ha; + bool ret = false; if (is_multicast_ether_addr(data)) { if (memcmp(data + netdev->addr_len, netdev->dev_addr, @@ -982,7 +988,9 @@ static bool rnp_cleanup_headers(struct rnp_ring __maybe_unused *rx_ring, struct sk_buff *skb) { struct net_device *netdev = rx_ring->netdev; - struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_adapter *adapter; + + adapter = netdev_priv(netdev); #ifndef OPTM_WITH_LARGE /* XDP packets use error pointer so abort at this point */ if (IS_ERR(skb)) @@ -1011,10 +1019,10 @@ static bool rnp_cleanup_headers(struct rnp_ring __maybe_unused *rx_ring, **/ void rnp_alloc_rx_buffers(struct rnp_ring *rx_ring, u16 cleaned_count) { + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + u16 i = rx_ring->next_to_use; union rnp_rx_desc *rx_desc; struct rnp_rx_buffer *bi; - u16 i = rx_ring->next_to_use; - u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); u16 bufsz; /* nothing to do */ if (!cleaned_count) @@ -1131,6 +1139,8 @@ static bool rnp_is_non_eop(struct rnp_ring *rx_ring, if (likely(rnp_test_staterr(rx_desc, RNP_RXD_STAT_EOP))) return false; /* place skb in next buffer to be received */ + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; return true; } @@ -1213,7 +1223,11 @@ static struct rnp_rx_buffer *rnp_get_rx_buffer(struct rnp_ring *rx_ring, static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, struct rnp_rx_buffer *rx_buffer) { - if (rnp_can_reuse_rx_page(rx_buffer)) { + struct rnp_q_vector *q_vector = rx_ring->q_vector; + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + + if (rnp_can_reuse_rx_page(rx_buffer, hw->dma_split_size)) { /* hand second half of page back to the ring */ rnp_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -1317,11 +1331,11 @@ static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, struct rnp_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; - unsigned int err_packets = 0; - unsigned int driver_drop_packets = 0; - struct sk_buff *skb = rx_ring->skb; struct rnp_adapter *adapter = q_vector->adapter; u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + unsigned int driver_drop_packets = 0; + struct sk_buff *skb = rx_ring->skb; + unsigned int err_packets = 0; while (likely(total_rx_packets < budget)) { union rnp_rx_desc *rx_desc; @@ -1427,6 +1441,8 @@ static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, /* populate checksum, timestamp, VLAN, and protocol */ rnp_process_skb_fields(rx_ring, rx_desc, skb); + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; rnp_rx_skb(q_vector, skb); skb = NULL; @@ -1464,10 +1480,10 @@ static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, **/ void rnp_alloc_rx_buffers(struct rnp_ring *rx_ring, u16 cleaned_count) { + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + u16 i = rx_ring->next_to_use; union rnp_rx_desc *rx_desc; struct rnp_rx_buffer *bi; - u16 i = rx_ring->next_to_use; - u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); u16 bufsz; /* nothing to do */ if (!cleaned_count) @@ -1619,7 +1635,11 @@ static void rnp_put_rx_buffer(struct rnp_ring *rx_ring, struct rnp_rx_buffer *rx_buffer, struct sk_buff *skb) { - if (rnp_can_reuse_rx_page(rx_buffer)) { + struct rnp_q_vector *q_vector = rx_ring->q_vector; + struct rnp_adapter *adapter = q_vector->adapter; + struct rnp_hw *hw = &adapter->hw; + + if (rnp_can_reuse_rx_page(rx_buffer, hw->dma_split_size)) { /* hand second half of page back to the ring */ rnp_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -1642,13 +1662,13 @@ static struct sk_buff *rnp_construct_skb(struct rnp_ring *rx_ring, struct xdp_buff *xdp, union rnp_rx_desc *rx_desc) { - unsigned int size = xdp->data_end - xdp->data; #if (PAGE_SIZE < 8192) unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); #endif + unsigned int size = xdp->data_end - xdp->data; struct sk_buff *skb; /* prefetch first cache line of first page */ @@ -1681,8 +1701,6 @@ static struct sk_buff *rnp_build_skb(struct rnp_ring *rx_ring, struct xdp_buff *xdp, union rnp_rx_desc *rx_desc) { - unsigned int metasize = xdp->data - xdp->data_meta; - void *va = xdp->data_meta; #if (PAGE_SIZE < 8192) unsigned int truesize = rnp_rx_pg_size(rx_ring) / 2; #else @@ -1690,6 +1708,8 @@ static struct sk_buff *rnp_build_skb(struct rnp_ring *rx_ring, SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + SKB_DATA_ALIGN(xdp->data_end - xdp->data_hard_start); #endif + unsigned int metasize = xdp->data - xdp->data_meta; + void *va = xdp->data_meta; struct sk_buff *skb; /* prefetch first cache line of first page */ @@ -1754,10 +1774,10 @@ static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, struct rnp_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; - unsigned int err_packets = 0; - unsigned int driver_drop_packets = 0; struct rnp_adapter *adapter = q_vector->adapter; u16 cleaned_count = rnp_desc_unused_rx(rx_ring); + unsigned int driver_drop_packets = 0; + unsigned int err_packets = 0; struct xdp_buff xdp; xdp.data = NULL; @@ -1877,8 +1897,12 @@ static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, continue; /* verify the packet layout is correct */ - if (rnp_cleanup_headers(rx_ring, rx_desc, skb)) + if (rnp_cleanup_headers(rx_ring, rx_desc, skb)) { + skb = NULL; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; continue; + } /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -1926,8 +1950,8 @@ static int rnp_clean_rx_irq(struct rnp_q_vector *q_vector, static void rnp_pull_tail(struct sk_buff *skb) { skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; unsigned int pull_len; + unsigned char *va; /* it is valid to use page_address instead of kmap since we are * working with pages allocated out of the lomem pool per @@ -2048,8 +2072,7 @@ static irqreturn_t rnp_msix_clean_rings(int irq, void *data) struct rnp_q_vector *q_vector = data; /* disable the hrtimer first */ - if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) - rnp_htimer_stop(q_vector); + rnp_htimer_stop(q_vector); /* disabled interrupts (on this vector) for us */ rnp_irq_disable_queues(q_vector); @@ -2063,10 +2086,15 @@ static irqreturn_t rnp_msix_clean_rings(int irq, void *data) static void update_rx_count(int cleaned, struct rnp_q_vector *q_vector) { struct rnp_adapter *adapter = q_vector->adapter; + u32 link_speed = adapter->link_speed; + struct rnp_ring *ring; if (!cleaned || cleaned == q_vector->new_rx_count) return; + if (link_speed != RNP_LINK_SPEED_10GB_FULL) + goto speed_1gb; + if (cleaned < 5) { q_vector->small_times = 0; q_vector->large_times = 0; @@ -2125,6 +2153,15 @@ static void update_rx_count(int cleaned, struct rnp_q_vector *q_vector) q_vector->small_times = 0; q_vector->large_times = 0; } + return; + +speed_1gb: + rnp_for_each_ring(ring, q_vector->rx) { + if (ring->ring_flags & RNP_RING_LOWER_ITR) + q_vector->new_rx_count = 1; + else + q_vector->new_rx_count = 32; + } } /** @@ -2139,10 +2176,10 @@ int rnp_poll(struct napi_struct *napi, int budget) struct rnp_q_vector *q_vector = container_of(napi, struct rnp_q_vector, napi); struct rnp_adapter *adapter = q_vector->adapter; - struct rnp_ring *ring; int per_ring_budget, work_done = 0; bool clean_complete = true; int cleaned_total = 0; + struct rnp_ring *ring; rnp_for_each_ring(ring, q_vector->tx) { if (!rnp_clean_tx_irq(q_vector, ring, budget)) @@ -2225,9 +2262,7 @@ int rnp_poll(struct napi_struct *napi, int budget) } } if (!test_bit(__RNP_DOWN, &adapter->state)) { - if (q_vector->vector_flags & - RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) - rnp_htimer_start(q_vector); + rnp_htimer_start(q_vector); /* Return budget-1 so that polling stops */ return budget - 1; } @@ -2262,16 +2297,10 @@ int rnp_poll(struct napi_struct *napi, int budget) q_vector->new_rx_count; } } + rnp_htimer_start(q_vector); } } - if (!test_bit(__RNP_DOWN, &adapter->state)) { - if (q_vector->vector_flags & - RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) - if (!test_bit(__RNP_DOWN, &adapter->state)) - rnp_htimer_start(q_vector); - } - return min(work_done, budget - 1); } @@ -2307,10 +2336,11 @@ static void rnp_irq_affinity_release(struct kref *ref) static irqreturn_t rnp_intr(int irq, void *data) { struct rnp_adapter *adapter = data; - struct rnp_q_vector *q_vector = adapter->q_vector[0]; + struct rnp_q_vector *q_vector; + + q_vector = adapter->q_vector[0]; /* in this mode only 1 q_vector is used */ - if (q_vector->vector_flags & RNP_QVECTOR_FLAG_IRQ_MISS_CHECK) - rnp_htimer_stop(q_vector); + rnp_htimer_stop(q_vector); /* disabled interrupts (on this vector) for us */ rnp_irq_disable_queues(q_vector); @@ -2334,8 +2364,8 @@ static irqreturn_t rnp_intr(int irq, void *data) static int rnp_request_msix_irqs(struct rnp_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int err; int i = 0, m; + int err; DPRINTK(IFUP, INFO, "[%s] num_q_vectors:%d\n", __func__, adapter->num_q_vectors); @@ -2505,8 +2535,8 @@ int rnp_setup_tx_maxrate(struct rnp_ring *tx_ring, u64 max_rate, static int rnp_tx_maxrate_own(struct rnp_adapter *adapter, int queue_index) { struct rnp_ring *tx_ring = adapter->tx_ring[queue_index]; - u64 real_rate = 0; u32 maxrate = adapter->max_rate[queue_index]; + u64 real_rate = 0; if (!maxrate) return rnp_setup_tx_maxrate(tx_ring, 0, @@ -2593,10 +2623,11 @@ void rnp_configure_tx_ring(struct rnp_adapter *adapter, **/ static void rnp_configure_tx(struct rnp_adapter *adapter) { - u32 i, dma_axi_ctl; struct rnp_hw *hw = &adapter->hw; - struct rnp_dma_info *dma = &hw->dma; + struct rnp_dma_info *dma; + u32 i, dma_axi_ctl; + dma = &hw->dma; /* dma_axi_en.tx_en must be before Tx queues are enabled */ dma_axi_ctl = dma_rd32(dma, RNP_DMA_AXI_EN); dma_axi_ctl |= TX_AXI_RW_EN; @@ -2617,8 +2648,8 @@ void rnp_configure_rx_ring(struct rnp_adapter *adapter, struct rnp_ring *ring) { struct rnp_hw *hw = &adapter->hw; - u64 desc_phy = ring->dma; u16 q_idx = ring->queue_index; + u64 desc_phy = ring->dma; /* disable queue to avoid issues while updating state */ rnp_disable_rx_queue(adapter, ring); @@ -2678,11 +2709,13 @@ void rnp_configure_rx_ring(struct rnp_adapter *adapter, static void rnp_configure_virtualization(struct rnp_adapter *adapter) { struct rnp_hw *hw = &adapter->hw; - struct rnp_dma_info *dma = &hw->dma; - u32 ring, vfnum; + struct rnp_dma_info *dma; u64 real_rate = 0; + u32 ring, vfnum; int i, vf_ring; + dma = &hw->dma; + if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { hw->ops.set_sriov_status(hw, false); return; @@ -2719,10 +2752,11 @@ static void rnp_configure_virtualization(struct rnp_adapter *adapter) static void rnp_set_rx_buffer_len(struct rnp_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3; struct rnp_ring *rx_ring; + int max_frame; int i; + max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN * 3; if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN); @@ -2753,12 +2787,11 @@ static void rnp_set_rx_buffer_len(struct rnp_adapter *adapter) static void rnp_configure_rx(struct rnp_adapter *adapter) { struct rnp_hw *hw = &adapter->hw; - struct rnp_dma_info *dma = &hw->dma; - int i; + struct rnp_dma_info *dma; u32 dma_axi_ctl; + int i; - /* disable receives while setting up the descriptors */ - + dma = &hw->dma; /* set_rx_buffer_len must be called before ring initialization */ rnp_set_rx_buffer_len(adapter); @@ -2785,8 +2818,9 @@ static int rnp_vlan_rx_add_vid(struct net_device *netdev, struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; bool veb_setup = true; - bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + bool sriov_flag; + sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); if (sriov_flag) { /* in sriov mode */ if ((vid) && adapter->vf_vlan && @@ -2826,7 +2860,7 @@ static int rnp_vlan_rx_add_vid(struct net_device *netdev, } } /* only ctags setup veb if in sriov and not stags */ - if (hw->ops.set_vlan_filter) { + if (vid && hw->ops.set_vlan_filter) { hw->ops.set_vlan_filter(hw, vid, true, (sriov_flag && veb_setup)); } @@ -2839,14 +2873,15 @@ static int rnp_vlan_rx_kill_vid(struct net_device *netdev, { struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; - struct rnp_eth_info *eth = &hw->eth; - int i; - bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + struct rnp_eth_info *eth; bool veb_setup = true; + bool sriov_flag; + int i; if (!vid) return 0; + sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); if (sriov_flag) { int true_remove = 1; @@ -2899,6 +2934,7 @@ static int rnp_vlan_rx_kill_vid(struct net_device *netdev, } } skip_setup: + eth = &hw->eth; /* need set ncsi vfta again */ if (hw->ncsi_en) eth->ops.ncsi_set_vfta(eth); @@ -2919,9 +2955,9 @@ static int rnp_vlan_rx_kill_vid(struct net_device *netdev, */ static void rnp_vlan_strip_disable(struct rnp_adapter *adapter) { - int i; - struct rnp_ring *tx_ring; struct rnp_hw *hw = &adapter->hw; + struct rnp_ring *tx_ring; + int i; for (i = 0; i < adapter->num_rx_queues; i++) { tx_ring = adapter->rx_ring[i]; @@ -2953,11 +2989,12 @@ static void rnp_remove_vlan(struct rnp_adapter *adapter) static void rnp_restore_vlan(struct rnp_adapter *adapter) { - u16 vid; struct rnp_hw *hw = &adapter->hw; - struct rnp_eth_info *eth = &hw->eth; + struct rnp_eth_info *eth; + u16 vid; int i; + eth = &hw->eth; /* in stags open, set stags_vid to vlan filter */ if (adapter->flags2 & RNP_FLAG2_VLAN_STAGS_ENABLED) eth->ops.set_vfta(eth, adapter->stags_vid, true); @@ -3066,8 +3103,8 @@ static const struct udp_tunnel_nic_info rnp_udp_tunnels_n10 = { static void rnp_fdir_filter_restore(struct rnp_adapter *adapter) { struct rnp_hw *hw = &adapter->hw; - struct hlist_node *node2; struct rnp_fdir_filter *filter; + struct hlist_node *node2; spin_lock(&adapter->fdir_perfect_lock); @@ -3080,16 +3117,33 @@ static void rnp_fdir_filter_restore(struct rnp_adapter *adapter) /* setup ntuple */ hlist_for_each_entry_safe(filter, node2, - &adapter->fdir_filter_list, fdir_node) { - rnp_fdir_write_perfect_filter( - adapter->fdir_mode, hw, &filter->filter, - filter->hw_idx, - (filter->action == RNP_FDIR_DROP_QUEUE) ? - RNP_FDIR_DROP_QUEUE : - adapter->rx_ring[filter->action]->rnp_queue_idx, - (adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO) ? - true : - false); + &adapter->fdir_filter_list, + fdir_node) { + bool drop = filter->action == RNP_FDIR_DROP_QUEUE; + bool prio = adapter->priv_flags & RNP_PRIV_FLAG_REMAP_PRIO; + + if (!filter->vf_num && + filter->action != ACTION_TO_MPE) { + int idx = adapter->rx_ring[filter->action]->rnp_queue_idx; + + rnp_fdir_write_perfect_filter(adapter->fdir_mode, hw, + &filter->filter, + filter->hw_idx, + drop ? + RNP_FDIR_DROP_QUEUE : + idx, + prio ? true : + false); + } else { + rnp_fdir_write_perfect_filter(adapter->fdir_mode, hw, + &filter->filter, + filter->hw_idx, + drop ? + RNP_FDIR_DROP_QUEUE : + filter->action, + prio ? true : + false); + } } spin_unlock(&adapter->fdir_perfect_lock); @@ -3115,11 +3169,11 @@ static void rnp_vlan_stags_flag(struct rnp_adapter *adapter) static void rnp_configure(struct rnp_adapter *adapter) { - struct rnp_hw *hw = &adapter->hw; bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); #if (PAGE_SIZE < 8192) struct rnp_ring *rx_ring = adapter->rx_ring[0]; #endif + struct rnp_hw *hw = &adapter->hw; /* We must restore virtualization before VLANs or else * the VLVF registers will not be populated @@ -3139,6 +3193,16 @@ static void rnp_configure(struct rnp_adapter *adapter) hw->dma_split_size = rnp_rx_pg_size(rx_ring) / 2 - rnp_rx_offset(rx_ring) - sizeof(struct skb_shared_info); +#else + /* if mtu more than this */ + hw->dma_split_size = SKB_WITH_OVERHEAD(PAGE_SIZE) - RNP_SKB_PAD; + + if (hw->max_length_current >= 1536) + hw->dma_split_size = min_t(int, hw->dma_split_size, hw->max_length_current); + else + hw->dma_split_size = 1536; + /* up to 16-asign */ + hw->dma_split_size = (hw->dma_split_size + 15) & (~0xf); #endif hw->ops.update_hw_info(hw); @@ -3216,6 +3280,10 @@ static void rnp_up_complete(struct rnp_adapter *adapter) mod_timer(&adapter->service_timer, jiffies); hw->link = 0; + + if (hw->saved_force_link_speed != RNP_LINK_SPEED_UNKNOWN) + rnp_mbx_force_speed(hw, hw->saved_force_link_speed); + hw->ops.set_mbx_link_event(hw, 1); hw->ops.set_mbx_ifup(hw, 1); } @@ -3249,9 +3317,9 @@ void rnp_up(struct rnp_adapter *adapter) void rnp_reset(struct rnp_adapter *adapter) { + bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); struct rnp_hw *hw = &adapter->hw; int err; - bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); /* lock SFP init bit to prevent race conditions with the watchdog */ while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) @@ -3288,8 +3356,8 @@ void rnp_reset(struct rnp_adapter *adapter) **/ static void rnp_clean_rx_ring(struct rnp_ring *rx_ring) { - u16 i = rx_ring->next_to_clean; struct rnp_rx_buffer *rx_buffer; + u16 i = rx_ring->next_to_clean; if (!rx_ring->rx_buffer_info) return; @@ -3391,15 +3459,16 @@ static void rnp_clean_rx_ring(struct rnp_ring *rx_ring) **/ static void rnp_clean_tx_ring(struct rnp_ring *tx_ring) { - unsigned long size; + struct rnp_tx_buffer *tx_buffer; u16 i = tx_ring->next_to_clean; - struct rnp_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; + unsigned long size; BUG_ON(!tx_ring); /* ring already cleared, nothing to do */ if (!tx_ring->tx_buffer_info) return; + tx_buffer = &tx_ring->tx_buffer_info[i]; while (i != tx_ring->next_to_use) { struct rnp_tx_desc *eop_desc, *tx_desc; @@ -3477,9 +3546,9 @@ static void rnp_clean_all_tx_rings(struct rnp_adapter *adapter) static void rnp_fdir_filter_exit(struct rnp_adapter *adapter) { - struct hlist_node *node2; - struct rnp_fdir_filter *filter; struct rnp_hw *hw = &adapter->hw; + struct rnp_fdir_filter *filter; + struct hlist_node *node2; spin_lock(&adapter->fdir_perfect_lock); @@ -3527,19 +3596,24 @@ static int rnp_xmit_nop_frame_ring(struct rnp_adapter *adapter, void rnp_down(struct rnp_adapter *adapter) { + bool is_pci_dead = pci_channel_offline(adapter->pdev); struct net_device *netdev = adapter->netdev; + bool is_pci_online = !is_pci_dead; struct rnp_hw *hw = &adapter->hw; - int i; int free_tx_ealay = 0; int err = 0; - bool is_pci_dead = pci_channel_offline(adapter->pdev); - bool is_pci_online = !is_pci_dead; + int i; /* signal that we are down to the interrupt handler */ set_bit(__RNP_DOWN, &adapter->state); if (!hw->ncsi_en && (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED))) hw->ops.set_mac_rx(hw, false); + if (hw->ncsi_en) { + /* if we false down, we should set mac loopback */ + hw->ops.set_mac_rx(hw, false); + } + hw->ops.set_mbx_link_event(hw, 0); hw->ops.set_mbx_ifup(hw, 0); @@ -3577,7 +3651,8 @@ void rnp_down(struct rnp_adapter *adapter) for (i = 0; i < adapter->num_rx_queues && is_pci_online; i++) { rnp_disable_rx_queue(adapter, adapter->rx_ring[i]); /* only handle when srio enable and change rx length setup */ - if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED || + hw->ncsi_en) && (adapter->rx_ring[i]->ring_flags & RNP_RING_FLAG_CHANGE_RX_LEN)) { int head; @@ -3614,6 +3689,21 @@ void rnp_down(struct rnp_adapter *adapter) rnp_ping_all_vfs(adapter); } + if (is_pci_online) { + struct device *dev = &adapter->pdev->dev; + u32 status = 0; + int timeout = 0; + + do { + status = rd32(hw, RNP_DMA_AXI_READY); + usleep_range(100, 200); + timeout++; + } while ((status != 0xffff) && (timeout < 100)); + + if (timeout > 100) + dev_info(dev, "wait axi ready timeout\n"); + } + /* disable transmits in the hardware now that interrupts are off */ for (i = 0; i < adapter->num_tx_queues && is_pci_online; i++) { struct rnp_ring *tx_ring = adapter->tx_ring[i]; @@ -3625,7 +3715,8 @@ void rnp_down(struct rnp_adapter *adapter) /* 2. try to set tx head to 0 in sriov mode * since we don't reset */ - if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED) && + if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED || + hw->ncsi_en) && (!(tx_ring->ring_flags & RNP_RING_SIZE_CHANGE_FIX))) { /* only do this if hw not support tx head to zero auto */ /* n10 should wait tx_ready */ @@ -3690,6 +3781,7 @@ void rnp_down(struct rnp_adapter *adapter) rnp_clean_all_tx_rings(adapter); rnp_clean_all_rx_rings(adapter); + if (hw->ncsi_en) hw->ops.set_mac_rx(hw, true); } @@ -3703,8 +3795,8 @@ static void rnp_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct rnp_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ - int i; bool real_tx_hang = false; + int i; #define TX_TIMEO_LIMIT 16000 for (i = 0; i < adapter->num_tx_queues; i++) { @@ -3739,10 +3831,10 @@ static void rnp_tx_timeout(struct net_device *netdev, unsigned int txqueue) **/ static int rnp_sw_init(struct rnp_adapter *adapter) { - struct rnp_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - unsigned int rss = 0, fdir; int rss_limit = num_online_cpus(); + struct rnp_hw *hw = &adapter->hw; + unsigned int rss = 0, fdir; #ifdef RNP_MAX_RINGS rss_limit = RNP_MAX_RINGS; #endif /* RNP_MAX_RINGS */ @@ -3787,8 +3879,10 @@ static int rnp_sw_init(struct rnp_adapter *adapter) /* set default work limits */ adapter->tx_work_limit = RNP_DEFAULT_TX_WORK; adapter->rx_usecs = RNP_PKT_TIMEOUT; + adapter->rx_usecs_usr_set = RNP_PKT_TIMEOUT; adapter->rx_frames = RNP_RX_PKT_POLL_BUDGET; adapter->tx_usecs = RNP_PKT_TIMEOUT_TX; + adapter->tx_usecs_usr_set = RNP_PKT_TIMEOUT_TX; adapter->tx_frames = RNP_TX_PKT_POLL_BUDGET; /* set default ring sizes */ adapter->tx_ring_item_count = RNP_DEFAULT_TXD; @@ -4090,9 +4184,9 @@ static void rnp_free_all_rx_resources(struct rnp_adapter *adapter) **/ static int rnp_change_mtu(struct net_device *netdev, int new_mtu) { + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2; struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN * 2; /* MTU < 68 is an error and causes problems on some kernels */ if (new_mtu < hw->min_length || max_frame > hw->max_length) @@ -4125,12 +4219,14 @@ static int rnp_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) { struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_ring *tx_ring = adapter->tx_ring[queue_index]; + struct rnp_ring *tx_ring; u64 real_rate = 0; adapter->max_rate[queue_index] = maxrate; rnp_dbg("%s: queue:%d maxrate:%d\n", __func__, queue_index, maxrate); + + tx_ring = adapter->tx_ring[queue_index]; if (!maxrate) return rnp_setup_tx_maxrate(tx_ring, 0, adapter->hw.usecstocount * 1000000); @@ -4254,8 +4350,8 @@ static int __maybe_unused rnp_resume(struct device *dev_d) struct pci_dev *pdev = to_pci_dev(dev_d); struct rnp_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; - u32 err; struct rnp_hw *hw = &adapter->hw; + u32 err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); @@ -4297,8 +4393,7 @@ static int __maybe_unused rnp_resume(struct device *dev_d) if (!err) err = register_mbx_irq(adapter); - if (hw->ops.driver_status) - hw->ops.driver_status(hw, false, rnp_driver_suspuse); + hw->ops.driver_status(hw, false, rnp_driver_suspuse); rnp_reset(adapter); @@ -4334,8 +4429,7 @@ static int __rnp_shutdown(struct pci_dev *pdev, bool *enable_wake) } rtnl_unlock(); - if (hw->ops.driver_status) - hw->ops.driver_status(hw, true, rnp_driver_suspuse); + hw->ops.driver_status(hw, true, rnp_driver_suspuse); remove_mbx_irq(adapter); rnp_clear_interrupt_scheme(adapter); @@ -4404,12 +4498,12 @@ static void rnp_shutdown(struct pci_dev *pdev) void rnp_update_stats(struct rnp_adapter *adapter) { struct net_device_stats *net_stats = &adapter->netdev->stats; - struct rnp_hw *hw = &adapter->hw; struct rnp_hw_stats *hw_stats = &adapter->hw_stats; - int i; - struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; u64 hw_csum_rx_error = 0; u64 hw_csum_rx_good = 0; + struct rnp_ring *ring; + int i; net_stats->tx_packets = 0; net_stats->tx_bytes = 0; @@ -4459,15 +4553,15 @@ void rnp_update_stats(struct rnp_adapter *adapter) */ static void rnp_check_hang_subtask(struct rnp_adapter *adapter) { - int i; + union rnp_rx_desc *rx_desc; struct rnp_ring *tx_ring; - u64 tx_next_to_clean_old; - u64 tx_next_to_clean; - u64 tx_next_to_use; struct rnp_ring *rx_ring; + u64 tx_next_to_clean_old; u64 rx_next_to_clean_old; + u64 tx_next_to_clean; u64 rx_next_to_clean; - union rnp_rx_desc *rx_desc; + u64 tx_next_to_use; + int i; /* If we're down or resetting, just bail */ if (test_bit(__RNP_DOWN, &adapter->state) || @@ -4558,17 +4652,33 @@ static void rnp_check_hang_subtask(struct rnp_adapter *adapter) clear_bit(__RNP_SERVICE_CHECK, &adapter->state); } +static void update_ring_delay(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *ring; + struct rnp_hw *hw = &adapter->hw; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + ring_wr32(ring, RNP_DMA_REG_RX_INT_DELAY_TIMER, + adapter->rx_usecs * hw->usecstocount); + ring = adapter->tx_ring[i]; + ring_wr32(ring, RNP_DMA_REG_TX_INT_DELAY_TIMER, + adapter->tx_usecs * hw->usecstocount); + } +} + /** * rnp_watchdog_update_link - update the link status * @adapter: pointer to the device adapter structure **/ static void rnp_watchdog_update_link(struct rnp_adapter *adapter) { - struct rnp_hw *hw = &adapter->hw; u32 link_speed = adapter->link_speed; - bool link_up = adapter->link_up; - bool duplex = adapter->duplex_old; bool flow_rx = true, flow_tx = true; + bool duplex = adapter->duplex_old; + struct rnp_hw *hw = &adapter->hw; + bool link_up = adapter->link_up; const char *speed_str; if (!(adapter->flags & RNP_FLAG_NEED_LINK_UPDATE)) @@ -4654,6 +4764,15 @@ static void rnp_watchdog_update_link(struct rnp_adapter *adapter) ((flow_rx && flow_tx) ? "RX/TX" : (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); + /* we should update rx irq delay and tx irq delay */ + if (link_speed == RNP_LINK_SPEED_10GB_FULL) { + adapter->rx_usecs = adapter->rx_usecs_usr_set; + adapter->tx_usecs = adapter->tx_usecs_usr_set; + } else { + adapter->rx_usecs = adapter->rx_usecs_usr_set * 6; + adapter->tx_usecs = adapter->tx_usecs_usr_set * 2; + } + update_ring_delay(adapter); } else { if (hw->ops.set_mac_speed) hw->ops.set_mac_speed(hw, false, 0, false); @@ -4847,8 +4966,8 @@ static void rnp_reset_subtask(struct rnp_adapter *adapter) static void rnp_rx_len_reset_subtask(struct rnp_adapter *adapter) { - int i; struct rnp_ring *rx_ring; + int i; for (i = 0; i < adapter->num_tx_queues; i++) { rx_ring = adapter->rx_ring[i]; @@ -4856,13 +4975,54 @@ static void rnp_rx_len_reset_subtask(struct rnp_adapter *adapter) RNP_RING_FLAG_DO_RESET_RX_LEN)) { dbg("[%s] Rx-ring %d count reset\n", adapter->netdev->name, rx_ring->rnp_queue_idx); - rnp_rx_ring_reinit(adapter, rx_ring); - rx_ring->ring_flags &= - (~RNP_RING_FLAG_DO_RESET_RX_LEN); + if (!rnp_rx_ring_reinit(adapter, rx_ring)) { + rx_ring->ring_flags &= + (~RNP_RING_FLAG_DO_RESET_RX_LEN); + } } } } +static void rnp_auto_itr_moderation(struct rnp_adapter *adapter) +{ + int i; + struct rnp_ring *rx_ring; + u64 period = (u64)(jiffies - adapter->last_moder_jiffies); + + if (!adapter->adaptive_rx_coal || + period < adapter->sample_interval * HZ) { + return; + } + + adapter->last_moder_jiffies = jiffies; + + /* it is time to check moderation */ + for (i = 0; i < adapter->num_rx_queues; i++) { + u64 x, rate; + u64 rx_packets, packets, rx_pkt_diff; + + rx_ring = adapter->rx_ring[i]; + rx_packets = READ_ONCE(rx_ring->stats.packets); + rx_pkt_diff = rx_packets - + adapter->last_moder_packets[rx_ring->queue_index]; + packets = rx_pkt_diff; + + x = packets * HZ; + do_div(x, period); + rate = x; + + if (rate != 0) { + if (rate < 20000) + rx_ring->ring_flags |= RNP_RING_LOWER_ITR; + else + rx_ring->ring_flags &= (~RNP_RING_LOWER_ITR); + } + + /* write back new count */ + adapter->last_moder_packets[rx_ring->queue_index] = rx_packets; + } +} + /** * rnp_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data @@ -4876,6 +5036,7 @@ static void rnp_service_task(struct work_struct *work) rnp_reset_pf_subtask(adapter); rnp_watchdog_subtask(adapter); rnp_rx_len_reset_subtask(adapter); + rnp_auto_itr_moderation(adapter); rnp_check_hang_subtask(adapter); rnp_service_event_complete(adapter); } @@ -4883,9 +5044,9 @@ static void rnp_service_task(struct work_struct *work) static int rnp_tso(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, u32 *mac_ip_len, u8 *hdr_len, u32 *tx_flags) { - struct sk_buff *skb = first->skb; struct net_device *netdev = tx_ring->netdev; struct rnp_adapter *adapter = netdev_priv(netdev); + struct sk_buff *skb = first->skb; union { struct iphdr *v4; struct ipv6hdr *v6; @@ -4896,11 +5057,11 @@ static int rnp_tso(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, struct udphdr *udp; unsigned char *hdr; } l4; - u32 paylen, l4_offset; - int err; - u8 *inner_mac; u16 gso_segs, gso_size; + u32 paylen, l4_offset; u16 gso_need_pad; + u8 *inner_mac; + int err; if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; @@ -5033,12 +5194,12 @@ static int rnp_tx_csum(struct rnp_ring *tx_ring, u32 *tx_flags) { struct sk_buff *skb = first->skb; + u8 *inner_mac = skb->data; u8 l4_proto = 0; - u8 ip_len = 0; + __be16 frag_off; u8 mac_len = 0; - u8 *inner_mac = skb->data; + u8 ip_len = 0; u8 *exthdr; - __be16 frag_off; union { struct iphdr *v4; struct ipv6hdr *v6; @@ -5184,14 +5345,14 @@ static int rnp_tx_map(struct rnp_ring *tx_ring, struct rnp_tx_buffer *first, u32 mac_ip_len, u32 tx_flags) { + u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56)); struct sk_buff *skb = first->skb; struct rnp_tx_buffer *tx_buffer; + u16 i = tx_ring->next_to_use; struct rnp_tx_desc *tx_desc; + unsigned int data_len, size; skb_frag_t *frag; dma_addr_t dma; - unsigned int data_len, size; - u16 i = tx_ring->next_to_use; - u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56)); tx_desc = RNP_TX_DESC(tx_ring, i); size = skb_headlen(skb); @@ -5328,9 +5489,9 @@ static int rnp_tx_map(struct rnp_ring *tx_ring, static void rnp_force_src_mac(struct sk_buff *skb, struct net_device *netdev) { + struct netdev_hw_addr *ha; u8 *data = skb->data; bool ret = false; - struct netdev_hw_addr *ha; /* force all multicast / broadcast src mac to myself */ if (is_multicast_ether_addr(data)) { if (memcmp(data + netdev->addr_len, netdev->dev_addr, @@ -5359,16 +5520,16 @@ netdev_tx_t rnp_xmit_frame_ring(struct sk_buff *skb, struct rnp_adapter *adapter, struct rnp_ring *tx_ring, bool tx_padding) { - struct rnp_tx_buffer *first; - int tso; - u32 tx_flags = 0; - unsigned short f; u16 count = TXD_USE_COUNT(skb_headlen(skb)); __be16 protocol = skb->protocol; - u8 hdr_len = 0; + struct rnp_tx_buffer *first; int ignore_vlan = 0; /* default len should not 0 (hw request) */ u32 mac_ip_len = 20; + u32 tx_flags = 0; + unsigned short f; + u8 hdr_len = 0; + int tso; tx_dbg("=== begin ====\n"); tx_dbg("rnp skb:%p, skb->len:%d headlen:%d, data_len:%d\n", skb, @@ -5646,15 +5807,15 @@ static int rnp_set_mac(struct net_device *netdev, void *p) struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; struct sockaddr *addr = p; - bool sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); + bool sriov_flag; - dbg("[%s] call set mac\n", netdev->name); if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, addr->sa_data); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + sriov_flag = !!(adapter->flags & RNP_FLAG_SRIOV_ENABLED); hw->ops.set_mac(hw, hw->mac.addr, sriov_flag); /* reset veb table */ @@ -5665,9 +5826,9 @@ static int rnp_set_mac(struct net_device *netdev, void *p) static int rnp_mdio_read(struct net_device *netdev, int prtad, int devad, u32 addr, u32 *phy_value) { - int rc = -EIO; struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int rc = -EIO; u16 value; rc = hw->ops.phy_read_reg(hw, addr, 0, &value); @@ -5827,6 +5988,12 @@ int rnp_setup_tc(struct net_device *dev, u8 tc) /* we cannot support tc with sriov mode */ if ((tc) && (adapter->flags & RNP_FLAG_SRIOV_ENABLED)) return -EINVAL; + /* if now we are in force mode, never need force, if not force it */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); + hw->ops.driver_status(hw, true, + rnp_driver_force_control_mac); + } /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the @@ -5865,6 +6032,13 @@ int rnp_setup_tc(struct net_device *dev, u8 tc) if (netif_running(dev)) ret = rnp_open(dev); + /* if we not set force now */ + if (!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + hw->ops.set_mac_rx(hw, false); + hw->ops.driver_status(hw, false, + rnp_driver_force_control_mac); + } + clear_bit(__RNP_RESETTING, &adapter->state); return ret; } @@ -5887,8 +6061,8 @@ static int rnp_delete_knode(struct net_device *dev, { /* 1. check weather filter rule is ingress root */ struct rnp_adapter *adapter = netdev_priv(dev); - u32 loc = cls->knode.handle & 0xfffff; u32 uhtid = TC_U32_USERHTID(cls->knode.handle); + u32 loc = cls->knode.handle & 0xfffff; int ret; if (uhtid != 0x800) @@ -5943,8 +6117,9 @@ static int rnp_clsu32_build_input(struct tc_cls_u32_offload *cls, const struct rnp_match_parser *parsers) { int i = 0, j = 0, err = -1; - __be32 val, mask, off; + __be32 val, mask; bool found; + int off; for (i = 0; i < cls->knode.sel->nkeys; i++) { off = cls->knode.sel->keys[i].off; @@ -5984,12 +6159,12 @@ static int rnp_config_knode(struct net_device *dev, __be16 protocol, * find a exist extry and the match val and mask is added before * so we don't need add it again */ - u32 uhtid, link_uhtid; - int ret; struct rnp_adapter *adapter = netdev_priv(dev); - u8 queue; - struct rnp_fdir_filter *input; u32 loc = cls->knode.handle & 0xfffff; + struct rnp_fdir_filter *input; + u32 uhtid, link_uhtid; + u8 queue; + int ret; if (protocol != htons(ETH_P_IP)) return -EOPNOTSUPP; @@ -6135,10 +6310,10 @@ static netdev_features_t rnp_fix_features(struct net_device *netdev, static int rnp_set_features(struct net_device *netdev, netdev_features_t features) { - struct rnp_adapter *adapter = netdev_priv(netdev); netdev_features_t changed = netdev->features ^ features; - bool need_reset = false; + struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + bool need_reset = false; netdev->features = features; if (changed & NETIF_F_NTUPLE) { @@ -6327,7 +6502,7 @@ static netdev_features_t rnp_features_check(struct sk_buff *skb, return features; } -const struct net_device_ops rnp10_netdev_ops = { +static const struct net_device_ops rnp10_netdev_ops = { .ndo_open = rnp_open, .ndo_stop = rnp_close, .ndo_start_xmit = rnp_xmit_frame, @@ -6381,8 +6556,8 @@ static void rnp_assign_netdev_ops(struct net_device *dev) int rnp_wol_supported(struct rnp_adapter *adapter, u16 device_id, u16 subdevice_id) { - int is_wol_supported = 0; struct rnp_hw *hw = &adapter->hw; + int is_wol_supported = 0; if (hw->wol_supported) is_wol_supported = 1; @@ -6407,8 +6582,8 @@ static void remove_mbx_irq(struct rnp_adapter *adapter) static int register_mbx_irq(struct rnp_adapter *adapter) { - struct rnp_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; + struct rnp_hw *hw = &adapter->hw; int err = 0; /* for mbx:vector0 */ @@ -6436,8 +6611,8 @@ static int register_mbx_irq(struct rnp_adapter *adapter) static int rnp_rm_adpater(struct rnp_adapter *adapter) { - struct net_device *netdev; struct rnp_hw *hw = &adapter->hw; + struct net_device *netdev; netdev = adapter->netdev; pr_info("= remove adapter:%s =\n", netdev->name); @@ -6473,8 +6648,7 @@ static int rnp_rm_adpater(struct rnp_adapter *adapter) adapter->netdev = NULL; - if (hw->ops.driver_status) - hw->ops.driver_status(hw, false, rnp_driver_insmod); + hw->ops.driver_status(hw, false, rnp_driver_insmod); remove_mbx_irq(adapter); @@ -6498,9 +6672,11 @@ static int rnp_rm_adpater(struct rnp_adapter *adapter) static void rnp_fix_dma_tx_status(struct rnp_adapter *adapter) { - int i; struct rnp_hw *hw = &adapter->hw; - struct rnp_dma_info *dma = &hw->dma; + struct rnp_dma_info *dma; + int i; + + dma = &hw->dma; if (hw->hw_type == rnp_hw_n10 || hw->hw_type == rnp_hw_n400) { for (i = 0; i < dma->max_tx_queues; i++) @@ -6536,17 +6712,16 @@ static int rnp_can_rpu_start(struct rnp_adapter *adapter) static int rnp_add_adpater(struct pci_dev *pdev, struct rnp_info *ii, struct rnp_adapter **padapter) { - int i, err = 0; + u32 queues = ii->total_queue_pair_cnts; struct rnp_adapter *adapter = NULL; - struct net_device *netdev; - struct rnp_hw *hw; - u8 __iomem *hw_addr = NULL; u8 __iomem *hw_addr_bar0 = NULL; - + u8 __iomem *hw_addr = NULL; + struct net_device *netdev; + static int bd_number; u32 dma_version = 0; u32 nic_version = 0; - u32 queues = ii->total_queue_pair_cnts; - static int bd_number; + struct rnp_hw *hw; + int i, err = 0; pr_info("==== add adapter queues:%d ====", queues); netdev = alloc_etherdev_mq(sizeof(struct rnp_adapter), queues); @@ -6708,6 +6883,10 @@ static int rnp_add_adpater(struct pci_dev *pdev, struct rnp_info *ii, hw->mac.num_rar_entries -= hw->ncsi_rar_entries; hw->num_rar_entries -= hw->ncsi_rar_entries; } + if (hw->force_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); hw->default_rx_queue = 0; pr_info("%s %s: dma version:0x%x, nic version:0x%x, pfvfnum:0x%x\n", adapter->name, pci_name(pdev), hw->dma_version, @@ -6740,8 +6919,13 @@ static int rnp_add_adpater(struct pci_dev *pdev, struct rnp_info *ii, goto err_sw_init; } - if (hw->ops.driver_status) - hw->ops.driver_status(hw, true, rnp_driver_insmod); + hw->ops.driver_status(hw, true, rnp_driver_insmod); + + { + bool flag_t = !!(adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + + hw->ops.driver_status(hw, flag_t, rnp_driver_force_control_mac); + } if (adapter->num_other_vectors) { #ifdef CONFIG_PCI_IOV @@ -6950,8 +7134,8 @@ static int rnp_add_adpater(struct pci_dev *pdev, struct rnp_info *ii, **/ static int rnp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct rnp_adapter *adapter; struct rnp_info *ii = rnp_info_tbl[id->driver_data]; + struct rnp_adapter *adapter; int err; /* Catch broken hardware that put the wrong VF device ID in @@ -7049,177 +7233,6 @@ static void rnp_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -/** - * rnp_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - */ -static pci_ers_result_t rnp_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct rnp_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - struct rnp_hw *hw = &adapter->hw; - -#ifdef CONFIG_PCI_IOV - struct pci_dev *bdev, *vfdev; - u32 dw0, dw1, dw2, dw3; - int vf, pos; - u16 req_id, pf_func; - - if (adapter->num_vfs == 0) - goto skip_bad_vf_detection; - - bdev = pdev->bus->self; - while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT)) - bdev = bdev->bus->self; - - if (!bdev) - goto skip_bad_vf_detection; - - pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR); - if (!pos) - goto skip_bad_vf_detection; - - pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); - pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); - pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); - pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); - - req_id = dw1 >> 16; - /* On the n10 if bit 7 of the requestor ID is set then it's a VF ? */ - if (!(req_id & 0x0080)) - goto skip_bad_vf_detection; - - pf_func = req_id & 0x01; - if ((pf_func & 1) == (pdev->devfn & 1)) { - unsigned int device_id; - - vf = (req_id & 0x7F) >> 1; - e_dev_err("VF %d has caused a PCIe error\n", vf); - e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2:", - dw0, dw1); - e_dev_err("%8.8x\tdw3: %8.8x\n", dw2, dw3); - switch (hw->hw_type) { - case rnp_hw_n10: - device_id = PCI_DEVICE_ID_N10_VF; - break; - case rnp_hw_n400: - device_id = PCI_DEVICE_ID_N400_VF; - break; - default: - device_id = PCI_DEVICE_ID_N10_VF; - } - /* Find the pci device of the offending VF */ - vfdev = pci_get_device(PCI_VENDOR_ID_MUCSE, device_id, NULL); - while (vfdev) { - if (vfdev->devfn == (req_id & 0xFF)) - break; - vfdev = pci_get_device(PCI_VENDOR_ID_MUCSE, device_id, - vfdev); - } - /* There's a slim chance the VF could have been hot plugged, - * so if it is no longer present we don't need to issue the - * VFLR. Just clean up the AER in that case. - */ - if (vfdev) { - e_dev_err("Issuing VFLR to VF %d\n", vf); - pci_write_config_dword(vfdev, 0xA8, 0x00008000); - /* Free device reference count */ - pci_dev_put(vfdev); - } - - pci_aer_clear_nonfatal_status(pdev); - } - - /* Even though the error may have occurred on the other port - * we still need to increment the vf error reference count for - * both ports because the I/O resume function will be called - * for both of them. - */ - adapter->vferr_refcount++; - - return PCI_ERS_RESULT_RECOVERED; - -skip_bad_vf_detection: -#endif /* CONFIG_PCI_IOV */ - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(netdev)) - rnp_down(adapter); - pci_disable_device(pdev); - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * rnp_io_slot_reset - called after the pci bus has been reset. - * @pdev: Pointer to PCI device - * - * Restart the card from scratch, as if from a cold-boot. - */ -static pci_ers_result_t rnp_io_slot_reset(struct pci_dev *pdev) -{ - pci_ers_result_t result = PCI_ERS_RESULT_NONE; - - struct rnp_adapter *adapter = pci_get_drvdata(pdev); - - if (pci_enable_device_mem(pdev)) { - e_err(probe, "Cannot re-enable PCI device after reset.\n"); - result = PCI_ERS_RESULT_DISCONNECT; - } else { - /* we need this */ - smp_mb__before_atomic(); - pci_set_master(pdev); - pci_restore_state(pdev); - pci_save_state(pdev); - pci_wake_from_d3(pdev, false); - rnp_reset(adapter); - result = PCI_ERS_RESULT_RECOVERED; - } - pci_aer_clear_nonfatal_status(pdev); - - return result; -} - -/** - * rnp_io_resume - called when traffic can start flowing again. - * @pdev: Pointer to PCI device - * - * This callback is called when the error recovery driver tells us that - * its OK to resume normal operation. - */ -static void rnp_io_resume(struct pci_dev *pdev) -{ - struct rnp_adapter *adapter = pci_get_drvdata(pdev); - struct net_device *netdev = adapter->netdev; - -#ifdef CONFIG_PCI_IOV - if (adapter->vferr_refcount) { - e_info(drv, "Resuming after VF err\n"); - adapter->vferr_refcount--; - return; - } - -#endif - if (netif_running(netdev)) - rnp_up(adapter); - - netif_device_attach(netdev); -} - -static const struct pci_error_handlers rnp_err_handler = { - .error_detected = rnp_io_error_detected, - .slot_reset = rnp_io_slot_reset, - .resume = rnp_io_resume, -}; - static SIMPLE_DEV_PM_OPS(rnp_pm_ops, rnp_suspend, rnp_resume); static struct pci_driver rnp_driver = { .name = rnp_driver_name, @@ -7229,7 +7242,6 @@ static struct pci_driver rnp_driver = { .driver.pm = &rnp_pm_ops, .shutdown = rnp_shutdown, .sriov_configure = rnp_pci_sriov_configure, - .err_handler = &rnp_err_handler, }; static int __init rnp_init_module(void) diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx.c b/drivers/net/ethernet/mucse/rnp/rnp_mbx.c index c3974469373d..1d59297b6c19 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_mbx.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx.c @@ -74,9 +74,9 @@ static inline u16 rnp_mbx_get_ack(struct rnp_hw *hw, int reg) static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, enum MBX_ID mbx_id) { + struct rnp_mbx_info *mbx = &hw->mbx; u16 req; int reg; - struct rnp_mbx_info *mbx = &hw->mbx; u32 v; reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) : @@ -98,11 +98,11 @@ static inline void rnp_mbx_inc_pf_req(struct rnp_hw *hw, static inline void rnp_mbx_inc_pf_ack(struct rnp_hw *hw, enum MBX_ID mbx_id) { - u16 ack; struct rnp_mbx_info *mbx = &hw->mbx; int reg = (mbx_id == MBX_CM3CPU) ? PF2CPU_COUNTER(mbx) : - PF2VF_COUNTER(mbx, mbx_id); + PF2VF_COUNTER(mbx, mbx_id); u32 v = mbx_rd32(hw, reg); + u16 ack; ack = (v >> 16) & 0xffff; ack++; @@ -275,9 +275,9 @@ static s32 rnp_write_posted_mbx(struct rnp_hw *hw, u32 *msg, u16 size, **/ static s32 rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) { + struct rnp_mbx_info *mbx = &hw->mbx; s32 ret_val = RNP_ERR_MBX; u16 hw_req_count = 0; - struct rnp_mbx_info *mbx = &hw->mbx; if (pci_channel_offline(hw->pdev)) return -EIO; @@ -316,8 +316,8 @@ static s32 rnp_check_for_msg_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) **/ static s32 rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) { - s32 ret_val = RNP_ERR_MBX; struct rnp_mbx_info *mbx = &hw->mbx; + s32 ret_val = RNP_ERR_MBX; if (pci_channel_offline(hw->pdev)) return -EIO; @@ -348,11 +348,11 @@ static s32 rnp_check_for_ack_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) **/ static s32 rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) { - int try_cnt = 5000; // wait 500ms struct rnp_mbx_info *mbx = &hw->mbx; u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : PF2VF_MBOX_CTRL(mbx, mbx_id); + int try_cnt = 5000; while (try_cnt-- > 0) { /* Take ownership of the buffer */ @@ -381,8 +381,6 @@ static s32 rnp_obtain_mbx_lock_pf(struct rnp_hw *hw, enum MBX_ID mbx_id) static s32 rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id) { - s32 ret_val = 0; - u16 i; struct rnp_mbx_info *mbx = &hw->mbx; u32 DATA_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) : @@ -390,6 +388,8 @@ static s32 rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : PF2VF_MBOX_CTRL(mbx, mbx_id); + s32 ret_val = 0; + u16 i; if (pci_channel_offline(hw->pdev)) return -EIO; @@ -449,14 +449,14 @@ static s32 rnp_write_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, static s32 rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, enum MBX_ID mbx_id) { - s32 ret_val = -EIO; - u32 i; struct rnp_mbx_info *mbx = &hw->mbx; u32 BUF_REG = (mbx_id == MBX_CM3CPU) ? CPU_PF_SHM_DATA(mbx) : PF_VF_SHM_DATA(mbx, mbx_id); u32 CTRL_REG = (mbx_id == MBX_CM3CPU) ? PF2CPU_MBOX_CTRL(mbx) : PF2VF_MBOX_CTRL(mbx, mbx_id); + s32 ret_val = -EIO; + u32 i; if (pci_channel_offline(hw->pdev)) return -EIO; @@ -499,8 +499,8 @@ static s32 rnp_read_mbx_pf(struct rnp_hw *hw, u32 *msg, u16 size, static void rnp_mbx_reset(struct rnp_hw *hw) { - int idx, v; struct rnp_mbx_info *mbx = &hw->mbx; + int idx, v; for (idx = 0; idx < hw->max_vfs; idx++) { v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); @@ -526,9 +526,9 @@ static void rnp_mbx_reset(struct rnp_hw *hw) static int rnp_mbx_configure_pf(struct rnp_hw *hw, int nr_vec, bool enable) { + struct rnp_mbx_info *mbx = &hw->mbx; int idx = 0; u32 v; - struct rnp_mbx_info *mbx = &hw->mbx; if (pci_channel_offline(hw->pdev)) return -EIO; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx.h b/drivers/net/ethernet/mucse/rnp/rnp_mbx.h index 411613db42e9..8eca881fbd1c 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_mbx.h +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx.h @@ -177,6 +177,7 @@ unsigned int rnp_mbx_change_timeout(struct rnp_hw *hw, int timeout_ms); extern struct rnp_mbx_operations mbx_ops_generic; int rnp_mbx_lldp_status_get(struct rnp_hw *hw); int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable); - +int rnp_mbx_ddr_csl_enable(struct rnp_hw *hw, + int enable, dma_addr_t dma_phy, int bytes); #endif /* _RNP_MBX_H_ */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c index 131a88715b2a..d61b37142ff5 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.c @@ -14,8 +14,8 @@ static bool is_cookie_valid(struct rnp_hw *hw, void *cookie) { - unsigned char *begin = (unsigned char *)(&hw->mbx.cookie_pool.cookies[0]); unsigned char *end = (unsigned char *)(&hw->mbx.cookie_pool.cookies[MAX_COOKIES_ITEMS]); + unsigned char *begin = (unsigned char *)(&hw->mbx.cookie_pool.cookies[0]); if (((unsigned char *)cookie) >= begin && ((unsigned char *)cookie) < end) return true; @@ -63,26 +63,27 @@ static struct mbx_req_cookie *mbx_cookie_zalloc(struct rnp_hw *hw, int priv_len) } /** - * @force_free: - * true: no other reference to this cookie, it is save to mark cookie reusable - * false: cookie may used by other(firmware), only available after 2min + * mbx_free_cookie + * @cookie: cookie to be freed + * @force_free: force free flag + * If no other reference to this cookie, it is save to mark cookie reusable + * cookie may used by other(firmware), only available after 2min **/ static void mbx_free_cookie(struct mbx_req_cookie *cookie, bool force_free) { if (!cookie) return; - if (force_free) { + if (force_free) cookie->stat = COOKIE_FREE; - } else { + else cookie->stat = COOKIE_FREE_WAIT_TIMEOUT; - } } static int rnp_mbx_write_posted_locked(struct rnp_hw *hw, struct mbx_fw_cmd_req *req) { - int err = 0; int retry = 3; + int err = 0; if (pci_channel_offline(hw->pdev)) return -EIO; @@ -133,8 +134,8 @@ static void rnp_link_stat_mark_disable(struct rnp_hw *hw) static int rnp_mbx_fw_post_req(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, struct mbx_req_cookie *cookie) { - int err = 0; struct rnp_adapter *adpt = hw->back; + int err = 0; if (pci_channel_offline(hw->pdev)) return -EIO; @@ -195,8 +196,8 @@ static int rnp_mbx_fw_post_req(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, static int rnp_fw_send_cmd_wait(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, struct mbx_fw_cmd_reply *reply) { - int err; int retry_cnt = 3; + int err; if (!hw || !req || !reply || !hw->mbx.ops.read_posted) { rnp_err("error: hw:%p req:%p reply:%p\n", hw, req, reply); @@ -252,8 +253,8 @@ static int rnp_fw_send_cmd_wait(struct rnp_hw *hw, struct mbx_fw_cmd_req *req, int wait_mbx_init_done(struct rnp_hw *hw) { - int count = 10000; u32 v = rd32(hw, RNP_TOP_NIC_DUMMY); + int count = 10000; while (count) { v = rd32(hw, RNP_TOP_NIC_DUMMY); @@ -274,12 +275,12 @@ int wait_mbx_init_done(struct rnp_hw *hw) **/ int rnp_mbx_get_lane_stat(struct rnp_hw *hw) { - int err = 0; - struct mbx_fw_cmd_req req; - struct rnp_adapter *adpt = hw->back; - struct lane_stat_data *st; struct mbx_req_cookie *cookie = NULL; + struct rnp_adapter *adpt = hw->back; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + struct lane_stat_data *st; + int err = 0; memset(&req, 0, sizeof(req)); @@ -308,7 +309,7 @@ int rnp_mbx_get_lane_stat(struct rnp_hw *hw) hw->phy_type = st->phy_type; adpt->speed = st->speed; hw->speed = adpt->speed; - if (st->is_sgmii) { + if (st->is_sgmii || hw->phy_type == PHY_TYPE_10G_TP) { adpt->phy_addr = st->phy_addr; } else { adpt->sfp.fault = st->sfp.fault; @@ -372,8 +373,8 @@ int rnp_mbx_get_lane_stat(struct rnp_hw *hw) **/ int rnp_mbx_get_link_stat(struct rnp_hw *hw) { - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -389,8 +390,8 @@ int rnp_mbx_get_link_stat(struct rnp_hw *hw) **/ int rnp_mbx_fw_reset_phy(struct rnp_hw *hw) { - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; int ret; memset(&req, 0, sizeof(req)); @@ -417,11 +418,11 @@ int rnp_maintain_req(struct rnp_hw *hw, int cmd, int arg0, int req_data_bytes, int reply_bytes, dma_addr_t dma_phy_addr) { - int err; struct mbx_req_cookie *cookie = NULL; - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; u64 address = dma_phy_addr; + struct mbx_fw_cmd_req req; + int err; cookie = mbx_cookie_zalloc(hw, 0); if (!cookie) @@ -462,9 +463,9 @@ int rnp_maintain_req(struct rnp_hw *hw, int cmd, int arg0, int rnp_fw_get_macaddr(struct rnp_hw *hw, int pfvfnum, u8 *mac_addr, int nr_lane) { - int err; - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -517,8 +518,8 @@ static int rnp_mbx_sfp_read(struct rnp_hw *hw, int sfp_i2c_addr, int reg, int cnt, u8 *out_buf) { struct mbx_fw_cmd_req req; - int err = -EIO; int nr_lane = hw->nr_lane; + int err = -EIO; if (cnt > MBX_SFP_READ_MAX_CNT || !out_buf) { rnp_err("%s: cnt:%d should <= %d out_buf:%p\n", __func__, @@ -599,8 +600,8 @@ int rnp_mbx_sfp_module_eeprom_info(struct rnp_hw *hw, int sfp_addr, int rnp_mbx_sfp_write(struct rnp_hw *hw, int sfp_addr, int reg, short v) { struct mbx_fw_cmd_req req; - int err; int nr_lane = hw->nr_lane; + int err; memset(&req, 0, sizeof(req)); build_mbx_sfp_write(&req, nr_lane, sfp_addr, reg, v); @@ -617,8 +618,8 @@ int rnp_mbx_sfp_write(struct rnp_hw *hw, int sfp_addr, int reg, short v) **/ int rnp_mbx_fw_reg_read(struct rnp_hw *hw, int fw_reg) { - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; int err, ret = 0xffffffff; memset(&req, 0, sizeof(req)); @@ -661,8 +662,8 @@ int rnp_mbx_fw_reg_read(struct rnp_hw *hw, int fw_reg) int rnp_mbx_reg_write(struct rnp_hw *hw, int fw_reg, int value) { struct mbx_fw_cmd_req req; - int err; int temp[4]; + int err; memset(&req, 0, sizeof(req)); temp[0] = value; @@ -706,8 +707,8 @@ int rnp_mbx_reg_writev(struct rnp_hw *hw, int fw_reg, int value[4], int rnp_mbx_wol_set(struct rnp_hw *hw, u32 mode) { struct mbx_fw_cmd_req req; - int err; int nr_lane = hw->nr_lane; + int err; memset(&req, 0, sizeof(req)); build_mbx_wol_set(&req, nr_lane, mode); @@ -724,8 +725,8 @@ int rnp_mbx_wol_set(struct rnp_hw *hw, u32 mode) **/ int rnp_mbx_set_dump(struct rnp_hw *hw, int flag) { - int err; struct mbx_fw_cmd_req req; + int err; memset(&req, 0, sizeof(req)); build_set_dump(&req, hw->nr_lane, flag); @@ -746,17 +747,20 @@ int rnp_mbx_force_speed(struct rnp_hw *hw, int speed) { int cmd = 0x01150000; - if (hw->force_10g_1g_speed_ablity == 0) + if (hw->force_10g_1g_speed_ability == 0) return -EINVAL; if (speed == RNP_LINK_SPEED_10GB_FULL) { cmd = 0x01150002; hw->force_speed_stat = FORCE_SPEED_STAT_10G; + hw->saved_force_link_speed = speed; } else if (speed == RNP_LINK_SPEED_1GB_FULL) { cmd = 0x01150001; hw->force_speed_stat = FORCE_SPEED_STAT_1G; + hw->saved_force_link_speed = speed; } else { cmd = 0x01150000; hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED; + hw->saved_force_link_speed = RNP_LINK_SPEED_UNKNOWN; } return rnp_mbx_set_dump(hw, cmd); @@ -772,14 +776,14 @@ int rnp_mbx_force_speed(struct rnp_hw *hw, int speed) **/ int rnp_mbx_get_dump(struct rnp_hw *hw, int flags, u8 *data_out, int bytes) { - int err; struct mbx_req_cookie *cookie = NULL; + struct get_dump_reply *get_dump; struct mbx_fw_cmd_reply reply; struct mbx_fw_cmd_req req; - struct get_dump_reply *get_dump; - void *dma_buf = NULL; dma_addr_t dma_phy = 0; + void *dma_buf = NULL; u64 address; + int err; cookie = mbx_cookie_zalloc(hw, sizeof(*get_dump)); if (!cookie) @@ -837,13 +841,13 @@ int rnp_mbx_get_dump(struct rnp_hw *hw, int flags, u8 *data_out, int bytes) int rnp_fw_update(struct rnp_hw *hw, int partition, const u8 *fw_bin, int bytes) { - int err; struct mbx_req_cookie *cookie = NULL; - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; void *dma_buf = NULL; dma_addr_t dma_phy; u64 address; + int err; cookie = mbx_cookie_zalloc(hw, 0); if (!cookie) { @@ -916,9 +920,9 @@ int rnp_mbx_link_event_enable(struct rnp_hw *hw, int enable) static int rnp_fw_get_capability(struct rnp_hw *hw, struct phy_abilities *abil) { - int err; - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -974,8 +978,8 @@ static int to_mac_type(struct phy_abilities *ability) int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, int value2, int value3) { - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -993,9 +997,9 @@ int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, **/ int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status) { - int err; - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -1020,9 +1024,9 @@ int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status) **/ int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status) { - int err; - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -1040,6 +1044,38 @@ int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status) return err; } +/** + * rnp_mbx_ifforce_control_mac - set mac force control to firmware + * @hw: hw private structure + * @status: force state + * + **/ +int rnp_mbx_ifforce_control_mac(struct rnp_hw *hw, int status) +{ + int err; + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifforce(&req, hw->nr_lane, status); + + if (mutex_lock_interruptible(&hw->mbx.lock)) + return -EAGAIN; + + err = hw->mbx.ops.write_posted(hw, (u32 *)&req, + (req.datalen + MBX_REQ_HDR_LEN) / 4, + MBX_FW); + + mutex_unlock(&hw->mbx.lock); + + rnp_logd(LOG_MBX_IFUP_DOWN, "%s: lane:%d status:%d\n", __func__, + hw->nr_lane, status); + + return err; +} + /** * rnp_mbx_ifup_down - set port status to firmware * @hw: hw private structure @@ -1048,9 +1084,9 @@ int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status) **/ int rnp_mbx_ifup_down(struct rnp_hw *hw, int up) { - int err; - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -1078,8 +1114,8 @@ int rnp_mbx_ifup_down(struct rnp_hw *hw, int up) **/ int rnp_mbx_led_set(struct rnp_hw *hw, int value) { - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; memset(&req, 0, sizeof(req)); memset(&reply, 0, sizeof(reply)); @@ -1096,69 +1132,87 @@ int rnp_mbx_led_set(struct rnp_hw *hw, int value) **/ int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info) { - int err; - struct phy_abilities ablity; + struct phy_abilities ability; int try_cnt = 3; + int err; - memset(&ablity, 0, sizeof(ablity)); + memset(&ability, 0, sizeof(ability)); rnp_link_stat_mark_disable(hw); while (try_cnt--) { - err = rnp_fw_get_capability(hw, &ablity); + err = rnp_fw_get_capability(hw, &ability); if (err == 0 && info) { - hw->lane_mask = ablity.lane_mask & 0xf; - info->mac = to_mac_type(&ablity); + hw->lane_mask = ability.lane_mask & 0xf; + info->mac = to_mac_type(&ability); info->adapter_cnt = hweight_long(hw->lane_mask); - hw->mode = ablity.nic_mode; - hw->pfvfnum = ablity.pfnum; - hw->speed = ablity.speed; - hw->nr_lane = 0; // PF1 - hw->fw_version = ablity.fw_version; + hw->mode = ability.nic_mode; + hw->pfvfnum = ability.pfnum; + hw->speed = ability.speed; + hw->nr_lane = 0; + hw->fw_version = ability.fw_version; hw->mac_type = info->mac; - hw->phy_type = ablity.phy_type; - hw->axi_mhz = ablity.axi_mhz; - hw->port_ids = ablity.port_ids; - hw->bd_uid = ablity.bd_uid; - hw->phy_id = ablity.phy_id; - hw->wol = ablity.wol_status; - hw->eco = ablity.e.v2; + hw->phy_type = ability.phy_type; + hw->axi_mhz = ability.axi_mhz; + hw->port_ids = ability.port_ids; + hw->bd_uid = ability.bd_uid; + hw->phy_id = ability.phy_id; + hw->wol = ability.wol_status; + hw->eco = ability.e.v2; + hw->force_link_supported = + ability.e.force_link_supported; + + if (ability.e.force_link_supported && + (ability.e.force_down_en & 0x1)) { + hw->force_status = 1; + } if (hw->fw_version >= 0x00050201 && - ablity.speed == SPEED_10000) { + ability.speed == SPEED_10000) { hw->force_speed_stat = FORCE_SPEED_STAT_DISABLED; - hw->force_10g_1g_speed_ablity = 1; + hw->force_10g_1g_speed_ability = 1; } - if (ablity.ext_ablity != 0xffffffff && - ablity.e.valid) { - hw->ncsi_en = (ablity.e.ncsi_en == 1); + if (ability.ext_ability != 0xffffffff && + ability.e.valid) { + hw->ncsi_en = (ability.e.ncsi_en == 1); hw->ncsi_rar_entries = 1; - hw->rpu_en = ablity.e.rpu_en; + hw->rpu_en = ability.e.rpu_en; if (hw->rpu_en) - ablity.e.rpu_availble = 1; - hw->rpu_availble = ablity.e.rpu_availble; - hw->fw_lldp_ablity = ablity.e.fw_lldp_ablity; + ability.e.rpu_availble = 1; + hw->rpu_availble = ability.e.rpu_availble; + hw->fw_lldp_ability = ability.e.fw_lldp_ability; } else { hw->ncsi_rar_entries = 0; } + if (hw->force_link_supported == 0) + hw->force_status = hw->ncsi_en ? 0 : 1; + pr_info("%s: nic-mode:%d mac:%d adpt_cnt:%d lane_mask:0x%x", __func__, hw->mode, info->mac, info->adapter_cnt, hw->lane_mask); pr_info("phy_type 0x%x, pfvfnum:0x%x, fw-version:0x%08x\n, axi:%d Mhz,", hw->phy_type, hw->pfvfnum, - ablity.fw_version, ablity.axi_mhz); - pr_info("port_id:%d bd_uid:0x%08x 0x%x ex-ablity:0x%x fs:%d speed:%d ", - ablity.port_id[0], hw->bd_uid, - ablity.phy_id, ablity.ext_ablity, - hw->force_10g_1g_speed_ablity, - ablity.speed); + ability.fw_version, ability.axi_mhz); + pr_info("port_id:%d bd_uid:0x%08x 0x%x ex-ability:0x%x fs:%d speed:%d ", + ability.port_id[0], hw->bd_uid, + ability.phy_id, ability.ext_ability, + hw->force_10g_1g_speed_ability, + ability.speed); pr_info("ncsi_en:%u %d wol=0x%x rpu:%d-%d eco %d\n", hw->ncsi_en, hw->ncsi_rar_entries, hw->wol, hw->rpu_en, hw->rpu_availble, hw->eco); + if (hw->phy_type == PHY_TYPE_10G_TP) { + hw->supported_link = RNP_LINK_SPEED_10GB_FULL | + RNP_LINK_SPEED_1GB_FULL | + RNP_LINK_SPEED_1GB_HALF; + hw->phy.autoneg_advertised = hw->supported_link; + hw->autoneg = 1; + } + if (info->adapter_cnt != 0) return 0; } @@ -1176,12 +1230,12 @@ int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info) **/ int rnp_mbx_get_temp(struct rnp_hw *hw, int *voltage) { - int err; struct mbx_req_cookie *cookie = NULL; struct mbx_fw_cmd_reply reply; struct mbx_fw_cmd_req req; struct get_temp *temp; int temp_v = 0; + int err; cookie = mbx_cookie_zalloc(hw, sizeof(*temp)); if (!cookie) @@ -1287,6 +1341,11 @@ static inline int rnp_mbx_fw_req_handler(struct rnp_adapter *adapter, else adapter->hw.link = 0; + if (req->link_stat.st[0].lldp_status) + adapter->priv_flags |= RNP_PRIV_FLAG_LLDP_EN_STAT; + else + adapter->priv_flags &= (~RNP_PRIV_FLAG_LLDP_EN_STAT); + if (req->link_stat.port_st_magic == SPEED_VALID_MAGIC) { hw->speed = req->link_stat.st[0].speed; hw->duplex = req->link_stat.st[0].duplex; @@ -1352,8 +1411,8 @@ static inline int rnp_mbx_fw_reply_handler(struct rnp_adapter *adapter, static inline int rnp_rcv_msg_from_fw(struct rnp_adapter *adapter) { - u32 msgbuf[RNP_FW_MAILBOX_SIZE]; struct rnp_hw *hw = &adapter->hw; + u32 msgbuf[RNP_FW_MAILBOX_SIZE]; s32 retval; retval = rnp_read_mbx(hw, msgbuf, RNP_FW_MAILBOX_SIZE, MBX_FW); @@ -1403,8 +1462,8 @@ int rnp_fw_msg_handler(struct rnp_adapter *adapter) **/ int rnp_mbx_phy_write(struct rnp_hw *hw, u32 reg, u32 val) { - struct mbx_fw_cmd_req req; char nr_lane = hw->nr_lane; + struct mbx_fw_cmd_req req; memset(&req, 0, sizeof(req)); build_set_phy_reg(&req, NULL, PHY_EXTERNAL_PHY_MDIO, nr_lane, reg, @@ -1422,9 +1481,9 @@ int rnp_mbx_phy_write(struct rnp_hw *hw, u32 reg, u32 val) **/ int rnp_mbx_phy_read(struct rnp_hw *hw, u32 reg, u32 *val) { + char nr_lane = hw->nr_lane; struct mbx_fw_cmd_req req; int err = -EIO; - char nr_lane = hw->nr_lane; memset(&req, 0, sizeof(req)); @@ -1471,8 +1530,8 @@ int rnp_mbx_phy_read(struct rnp_hw *hw, u32 reg, u32 *val) int rnp_mbx_phy_link_set(struct rnp_hw *hw, int adv, int autoneg, int speed, int duplex, int mdix_ctrl) { - int err; struct mbx_fw_cmd_req req; + int err; memset(&req, 0, sizeof(req)); build_phy_link_set(&req, adv, hw->nr_lane, autoneg, speed, duplex, @@ -1496,8 +1555,8 @@ int rnp_mbx_phy_link_set(struct rnp_hw *hw, int adv, int autoneg, **/ int rnp_mbx_phy_pause_set(struct rnp_hw *hw, int pause_mode) { - int err; struct mbx_fw_cmd_req req; + int err; memset(&req, 0, sizeof(req)); build_phy_pause_set(&req, pause_mode, hw->nr_lane); @@ -1515,10 +1574,10 @@ int rnp_mbx_phy_pause_set(struct rnp_hw *hw, int pause_mode) int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable) { struct mbx_fw_cmd_req req; - int err; int nr_lane = hw->nr_lane; + int err; - if (!hw->fw_lldp_ablity) { + if (!hw->fw_lldp_ability) { rnp_warn("lldp set not supported\n"); return -EOPNOTSUPP; } @@ -1533,11 +1592,11 @@ int rnp_mbx_lldp_port_enable(struct rnp_hw *hw, bool enable) int rnp_mbx_lldp_status_get(struct rnp_hw *hw) { - struct mbx_fw_cmd_req req; struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; int err, ret = 0; - if (!hw->fw_lldp_ablity) { + if (!hw->fw_lldp_ability) { rnp_warn("fw lldp not supported\n"); return -EOPNOTSUPP; } @@ -1572,3 +1631,21 @@ int rnp_mbx_lldp_status_get(struct rnp_hw *hw) } return ret; } + +int rnp_mbx_ddr_csl_enable(struct rnp_hw *hw, + int enable, dma_addr_t dma_phy, + int bytes) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + + memset(&req, 0, sizeof(req)); + + build_ddr_csl(&req, NULL, enable, dma_phy, bytes); + + if (hw->mbx.other_irq_enabled) + return rnp_mbx_write_posted_locked(hw, &req); + + memset(&reply, 0, sizeof(reply)); + return rnp_fw_send_cmd_wait(hw, &req, &reply); +} diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h index bac0e666db4a..bf1298318c6e 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h +++ b/drivers/net/ethernet/mucse/rnp/rnp_mbx_fw.h @@ -61,6 +61,7 @@ enum GENERIC_CMD { SEND_TO_VF = 0x0802, DRIVER_INSMOD = 0x0803, SYSTEM_SUSPUSE = 0x0804, + SYSTEM_FORCE = 0x0805, /* link configuration admin commands */ GET_PHY_ABALITY = 0x0601, @@ -93,6 +94,7 @@ enum GENERIC_CMD { GET_TEMP = 0x0a11, SET_WOL = 0x0a12, LLDP_TX_CTL = 0x0a13, + SET_DDR_CSL = 0xFF11, }; enum link_event_mask { @@ -120,8 +122,22 @@ enum pma_type { PHY_TYPE_40G_BASE_LR4, PHY_TYPE_10G_BASE_LR, PHY_TYPE_10G_BASE_ER, + PHY_TYPE_10G_TP }; +#define PHY_C45 (BIT(30)) +#define PHY_MMD(i) ((i) << 16) +#define PHY_MMD_PMAPMD PHY_MMD(1) +#define PHY_MMD_AN PHY_MMD(7) +#define PHY_MMD_VEND2 PHY_MMD(31) +#define PHY_826x_MDIX (PHY_C45 | PHY_MMD_VEND2 | 0xa430) +#define PHY_826x_SPEED (PHY_C45 | PHY_MMD_PMAPMD | 0) +#define PHY_826x_DUPLEX (PHY_C45 | PHY_MMD_VEND2 | 0xa44) +#define PHY_826x_AN (PHY_C45 | PHY_MMD_AN | 0) +#define PHY_826x_ADV (PHY_C45 | PHY_MMD_AN | 16) +#define PHY_826x_GBASE_ADV (PHY_C45 | PHY_MMD_AN | 0x20) +#define PHY_826x_GBASE_ADV_2 (PHY_C45 | PHY_MMD_VEND2 | 0xa412) + struct phy_abilities { unsigned char link_stat; unsigned char lane_mask; @@ -141,7 +157,7 @@ struct phy_abilities { int wol_status; union { - unsigned int ext_ablity; + unsigned int ext_ability; struct { unsigned int valid : 1; /* 0 */ unsigned int wol_en : 1; /* 1 */ @@ -155,10 +171,16 @@ struct phy_abilities { unsigned int yt8614 : 1; /* 9 */ unsigned int pci_ext_reset : 1; /* 10 */ unsigned int rpu_availble : 1; /* 11 */ - unsigned int fw_lldp_ablity : 1; /* 12 */ + unsigned int fw_lldp_ability : 1; /* 12 */ unsigned int lldp_enabled : 1; /* 13 */ unsigned int only_1g : 1; /* 14 */ - + unsigned int force_down_en : 4; /* 15-18 */ + unsigned int force_link_supported : 1; /* 19 */ + unsigned int ports_is_sgmii_valid : 1; /* [20] */ + unsigned int lane0_is_sgmii : 1; /* [21] */ + unsigned int lane1_is_sgmii : 1; /* [22] */ + unsigned int lane2_is_sgmii : 1; /* [23] */ + unsigned int lane3_is_sgmii : 1; /* [24] */ } e; }; @@ -218,7 +240,7 @@ struct link_stat_data { /* 3:ignore */ char an_completed : 1; - char lp_an_ablity : 1; + char lp_an_ability : 1; char parallel_detection_fault : 1; char fec_enabled : 1; char low_power_state : 1; @@ -263,11 +285,14 @@ struct link_stat_data { struct port_stat { u8 phyid; - u8 duplex : 1; - u8 autoneg : 1; - u8 fec : 1; - u16 speed; - u16 pause; + u8 duplex : 1; + u8 autoneg : 1; + u8 fec : 1; + u8 rev : 1; + u8 link_traing : 1; + u8 is_sgmii : 1; + u8 lldp_status : 1; + u32 speed; } __attribute__((packed)); struct lane_stat_data { @@ -415,6 +440,11 @@ struct mbx_fw_cmd_req { int status; } ifsuspuse; + struct { + int lane; + int status; + } ifforce; + struct { int nr_lane; } get_lane_st; @@ -553,6 +583,13 @@ struct mbx_fw_cmd_req { int pfvf_num; } get_mac_addr; + struct { + int enable; + int ddr_phy_hi; + int ddr_phy_lo; + int bytes; + } ddr_csl; + struct { char phy_interface; union { @@ -1024,6 +1061,19 @@ static inline void build_ifsuspuse(struct mbx_fw_cmd_req *req, req->ifinsmod.status = status; } +static inline void build_ifforce(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int status) +{ + req->flags = 0; + req->opcode = SYSTEM_FORCE; + req->datalen = sizeof(req->ifforce); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifforce.lane = nr_lane; + req->ifforce.status = status; +} + static inline void build_mbx_sfp_read(struct mbx_fw_cmd_req *req, unsigned int nr_lane, int sfp_addr, int reg, int cnt, void *cookie) @@ -1106,6 +1156,30 @@ build_link_set_loopback_req(struct mbx_fw_cmd_req *req, void *cookie, } } +static inline void build_ddr_csl(struct mbx_fw_cmd_req *req, + void *cookie, bool enable, + dma_addr_t dma_phy, int bytes) +{ + u64 address = dma_phy; + + req->flags = 0; + req->opcode = SET_DDR_CSL; + req->datalen = sizeof(req->ddr_csl); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->ddr_csl.enable = enable; + + if (enable) { + req->ddr_csl.bytes = bytes; + req->ddr_csl.ddr_phy_hi = (address >> 32); + req->ddr_csl.ddr_phy_lo = address & 0xffffffff; + } else { + req->ddr_csl.bytes = 0; + } +} + /* =========== errcode======= */ enum MBX_ERR { MBX_OK = 0, @@ -1153,6 +1227,7 @@ int rnp_set_lane_fun(struct rnp_hw *hw, int fun, int value0, int value1, int rnp_mbx_ifinsmod(struct rnp_hw *hw, int status); int rnp_mbx_ifsuspuse(struct rnp_hw *hw, int status); int rnp_mbx_ifup_down(struct rnp_hw *hw, int up); +int rnp_mbx_ifforce_control_mac(struct rnp_hw *hw, int status); int rnp_mbx_led_set(struct rnp_hw *hw, int value); int rnp_mbx_get_capability(struct rnp_hw *hw, struct rnp_info *info); int rnp_mbx_get_temp(struct rnp_hw *hw, int *voltage); diff --git a/drivers/net/ethernet/mucse/rnp/rnp_mpe.c b/drivers/net/ethernet/mucse/rnp/rnp_mpe.c index be6334f2bb18..1216b19d1241 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_mpe.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_mpe.c @@ -37,7 +37,7 @@ do { \ int i; \ for (i = 0; i < size; i++) { \ - rnp_wr_reg(((char *)(rpubase)) + (offset) + \ + rnp_wr_reg((rpubase) + (offset) + \ i * 4, \ (array)[i]); \ } \ @@ -61,7 +61,7 @@ static void rnp_reset_mpe_and_rpu(struct rnp_hw *hw) mdelay(100); } -static void rnp_start_rpu(char *rpu_base, int do_start) +static void rnp_start_rpu(u8 __iomem *rpu_base, int do_start) { int mpe_start_v = 0xff, rpu_start_v = 0x1; @@ -77,7 +77,8 @@ static void rnp_start_rpu(char *rpu_base, int do_start) } /* down bin to rpu */ -static int rnp_download_and_start_rpu(struct rnp_hw *hw, char *rpu_base, +static int rnp_download_and_start_rpu(struct rnp_hw *hw, + u8 __iomem *rpu_base, const unsigned int *mpe_bin, const int mpe_bin_sz, const unsigned int *mpe_data, diff --git a/drivers/net/ethernet/mucse/rnp/rnp_n10.c b/drivers/net/ethernet/mucse/rnp/rnp_n10.c index 228bffde15c5..6ec650b67114 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_n10.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_n10.c @@ -121,10 +121,10 @@ static struct rnp_dma_operations dma_ops_n10 = { static s32 rnp_eth_set_rar_n10(struct rnp_eth_info *eth, u32 index, u8 *addr, bool enable_addr) { - u32 mcstctrl; - u32 rar_low, rar_high = 0; u32 rar_entries = eth->num_rar_entries; struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u32 rar_low, rar_high = 0; + u32 mcstctrl; /* Make sure we are using a valid rar index range */ if (index >= (rar_entries + hw->ncsi_rar_entries)) { @@ -173,8 +173,8 @@ static s32 rnp_eth_set_rar_n10(struct rnp_eth_info *eth, u32 index, u8 *addr, **/ static s32 rnp_eth_clear_rar_n10(struct rnp_eth_info *eth, u32 index) { - u32 rar_high; u32 rar_entries = eth->num_rar_entries; + u32 rar_high; /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { @@ -280,10 +280,10 @@ static s32 rnp10_mta_vector(struct rnp_eth_info *eth, u8 *mc_addr) static void rnp10_set_mta(struct rnp_hw *hw, u8 *mc_addr) { - u32 vector; + struct rnp_eth_info *eth = &hw->eth; u32 vector_bit; u32 vector_reg; - struct rnp_eth_info *eth = &hw->eth; + u32 vector; hw->addr_ctrl.mta_in_use++; vector = rnp10_mta_vector(eth, mc_addr); @@ -305,9 +305,9 @@ static void rnp10_set_mta(struct rnp_hw *hw, u8 *mc_addr) static void rnp10_set_vf_mta(struct rnp_hw *hw, u16 vector) { + struct rnp_eth_info *eth = &hw->eth; u32 vector_bit; u32 vector_reg; - struct rnp_eth_info *eth = &hw->eth; hw->addr_ctrl.mta_in_use++; vector_reg = (vector >> 5) & 0x7F; @@ -352,12 +352,12 @@ static s32 rnp_eth_update_mc_addr_list_n10(struct rnp_eth_info *eth, bool sriov_on) { struct rnp_hw *hw = (struct rnp_hw *)eth->back; + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; struct netdev_hw_addr *ha; + u8 *addr_list = NULL; + int addr_count = 0; u32 i; u32 v; - int addr_count = 0; - u8 *addr_list = NULL; - struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; /* Set the new number of MC addresses that we are being requested to * use. @@ -446,13 +446,13 @@ static void rnp_eth_clr_mc_addr_n10(struct rnp_eth_info *eth) **/ static void rnp_eth_update_rss_key_n10(struct rnp_eth_info *eth, bool sriov_flag) { + u32 iov_en = (sriov_flag) ? RNP10_IOV_ENABLED : 0; struct rnp_hw *hw = (struct rnp_hw *)eth->back; - int i; - u8 *key_temp; int key_len = RNP_RSS_KEY_SIZE; u8 *key = hw->rss_key; + u8 *key_temp; u32 *value; - u32 iov_en = (sriov_flag) ? RNP10_IOV_ENABLED : 0; + int i; key_temp = kmalloc(key_len, GFP_KERNEL); /* reoder the key */ @@ -502,11 +502,11 @@ static void rnp_eth_update_rss_table_n10(struct rnp_eth_info *eth) **/ static s32 rnp_eth_set_vfta_n10(struct rnp_eth_info *eth, u32 vlan, bool vlan_on) { + bool vfta_changed = false; + u32 targetbit; s32 regindex; u32 bitindex; u32 vfta; - u32 targetbit; - bool vfta_changed = false; if (vlan > 4095) return RNP_ERR_PARAM; @@ -620,9 +620,9 @@ static u16 rnp_tuple5_pritologic_n10(u16 hw_id) static u16 rnp_tuple5_pritologic_tcam_n10(u16 pri_id) { - int i; int hw_id = 0; int step = 32; + int i; for (i = 0; i < pri_id; i++) { hw_id += step; @@ -655,22 +655,22 @@ static void rnp_eth_set_tuple5_n10(struct rnp_eth_info *eth, dbg("try to eable tuple 5 %x\n", hw_id); if (input->formatted.src_ip[0] != 0) { eth_wr32(eth, RNP10_ETH_TUPLE5_SAQF(hw_id), - htonl(input->formatted.src_ip[0])); + ntohl(input->formatted.src_ip[0])); } else { mask_temp |= RNP10_SRC_IP_MASK; } if (input->formatted.dst_ip[0] != 0) { eth_wr32(eth, RNP10_ETH_TUPLE5_DAQF(hw_id), - htonl(input->formatted.dst_ip[0])); + ntohl(input->formatted.dst_ip[0])); } else { mask_temp |= RNP10_DST_IP_MASK; } if (input->formatted.src_port != 0) - port |= (htons(input->formatted.src_port)); + port |= (ntohs(input->formatted.src_port)); else mask_temp |= RNP10_SRC_PORT_MASK; if (input->formatted.dst_port != 0) - port |= (htons(input->formatted.dst_port) << 16); + port |= (ntohs(input->formatted.dst_port) << 16); else mask_temp |= RNP10_DST_PORT_MASK; @@ -733,31 +733,31 @@ static void rnp_eth_set_tuple5_n10(struct rnp_eth_info *eth, eth_wr32(eth, RNP10_TCAM_MODE, 2); if (input->formatted.src_ip[0] != 0) { eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), - htonl(input->formatted.src_ip[0])); + ntohl(input->formatted.src_ip[0])); eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), - htonl(input->formatted.src_ip_mask[0])); + ntohl(input->formatted.src_ip_mask[0])); } else { eth_wr32(eth, RNP10_TCAM_SAQF(hw_id), 0); eth_wr32(eth, RNP10_TCAM_SAQF_MASK(hw_id), 0); } if (input->formatted.dst_ip[0] != 0) { eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), - htonl(input->formatted.dst_ip[0])); + ntohl(input->formatted.dst_ip[0])); eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), - htonl(input->formatted.dst_ip_mask[0])); + ntohl(input->formatted.dst_ip_mask[0])); } else { eth_wr32(eth, RNP10_TCAM_DAQF(hw_id), 0); eth_wr32(eth, RNP10_TCAM_DAQF_MASK(hw_id), 0); } if (input->formatted.src_port != 0) { - port |= (htons(input->formatted.src_port) << 16); - port_mask |= (htons(input->formatted.src_port_mask) + port |= (ntohs(input->formatted.src_port) << 16); + port_mask |= (ntohs(input->formatted.src_port_mask) << 16); } if (input->formatted.dst_port != 0) { - port |= (htons(input->formatted.dst_port)); + port |= (ntohs(input->formatted.dst_port)); port_mask |= - (htons(input->formatted.dst_port_mask)); + (ntohs(input->formatted.dst_port_mask)); } /* setup src & dst port */ @@ -819,8 +819,8 @@ static void rnp_eth_set_tuple5_n10(struct rnp_eth_info *eth, static void rnp_eth_clr_tuple5_n10(struct rnp_eth_info *eth, u16 pri_id) { - u16 hw_id; struct rnp_hw *hw = (struct rnp_hw *)eth->back; + u16 hw_id; if (hw->fdir_mode != fdir_mode_tcam) { hw_id = rnp_tuple5_pritologic_n10(pri_id); @@ -896,8 +896,8 @@ static void rnp_eth_set_vlan_strip_n10(struct rnp_eth_info *eth, u16 queue, bool enable) { u32 reg = RNP10_ETH_VLAN_VME_REG(queue / 32); - u32 offset = queue % 32; u32 data = eth_rd32(eth, reg); + u32 offset = queue % 32; if (enable) data |= (1 << offset); @@ -996,9 +996,9 @@ static void rnp_eth_set_vf_vlan_mode_n10(struct rnp_eth_info *eth, static int __get_ncsi_shm_info(struct rnp_hw *hw, struct ncsi_shm_info *ncsi_shm) { - int i; - int *ptr = (int *)ncsi_shm; int rbytes = round_up(sizeof(*ncsi_shm), 4); + int *ptr = (int *)ncsi_shm; + int i; memset(ncsi_shm, 0, sizeof(*ncsi_shm)); for (i = 0; i < (rbytes / 4); i++) @@ -1010,8 +1010,8 @@ static int __get_ncsi_shm_info(struct rnp_hw *hw, static void rnp_ncsi_set_uc_addr_n10(struct rnp_eth_info *eth) { - struct ncsi_shm_info ncsi_shm; struct rnp_hw *hw = (struct rnp_hw *)eth->back; + struct ncsi_shm_info ncsi_shm; u8 mac[ETH_ALEN]; @@ -1036,10 +1036,10 @@ static void rnp_ncsi_set_uc_addr_n10(struct rnp_eth_info *eth) static void rnp_ncsi_set_mc_mta_n10(struct rnp_eth_info *eth) { - struct ncsi_shm_info ncsi_shm; struct rnp_hw *hw = (struct rnp_hw *)eth->back; - u8 i; + struct ncsi_shm_info ncsi_shm; u8 mac[ETH_ALEN]; + u8 i; if (!hw->ncsi_en) return; @@ -1063,8 +1063,8 @@ static void rnp_ncsi_set_mc_mta_n10(struct rnp_eth_info *eth) static void rnp_ncsi_set_vfta_n10(struct rnp_eth_info *eth) { - struct ncsi_shm_info ncsi_shm; struct rnp_hw *hw = (struct rnp_hw *)eth->back; + struct ncsi_shm_info ncsi_shm; if (!hw->ncsi_en) return; @@ -1146,9 +1146,9 @@ static s32 rnp_get_permtion_mac_addr_n10(struct rnp_hw *hw, u8 *mac_addr) static s32 rnp_reset_hw_ops_n10(struct rnp_hw *hw) { - int i; struct rnp_dma_info *dma = &hw->dma; struct rnp_eth_info *eth = &hw->eth; + int i; /* Call adapter stop to disable tx/rx and clear interrupts */ dma_wr32(dma, RNP_DMA_AXI_EN, 0); @@ -1188,9 +1188,7 @@ static s32 rnp_reset_hw_ops_n10(struct rnp_hw *hw) rnp_wr_reg(hw->ring_msix_base + RING_VECTOR(i), 0); /* setup pause reg if is_sgmii */ - if (hw->phy_type != PHY_TYPE_SGMII) - goto out; - { + if (hw->phy_type == PHY_TYPE_SGMII) { u16 pause_bits = 0; u32 value; @@ -1213,15 +1211,15 @@ static s32 rnp_reset_hw_ops_n10(struct rnp_hw *hw) value |= pause_bits; rnp_mbx_phy_write(hw, 4, value); } -out: + return 0; } static s32 rnp_start_hw_ops_n10(struct rnp_hw *hw) { - s32 ret_val = 0; struct rnp_eth_info *eth = &hw->eth; struct rnp_dma_info *dma = &hw->dma; + s32 ret_val = 0; /* ETH Registers */ eth_wr32(eth, RNP10_ETH_ERR_MASK_VECTOR, @@ -1319,9 +1317,9 @@ static void rnp_set_vlan_strip_hw_ops_n10(struct rnp_hw *hw, u16 queue, static void rnp_set_mac_hw_ops_n10(struct rnp_hw *hw, u8 *mac, bool sriov_flag) { + struct rnp_mac_info *mac_info = &hw->mac; struct rnp_eth_info *eth = &hw->eth; struct rnp_dma_info *dma = &hw->dma; - struct rnp_mac_info *mac_info = &hw->mac; /* use this queue index to setup veb */ /* now pf use queu 0 /1 */ /* vfnum is the last vfnum */ @@ -1357,8 +1355,8 @@ static int rnp_write_uc_addr_list_n10(struct rnp_hw *hw, bool sriov_flag) { unsigned int rar_entries = hw->num_rar_entries - 1; - u32 vfnum = hw->vfnum; struct rnp_eth_info *eth = &hw->eth; + u32 vfnum = hw->vfnum; int count = 0; if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) @@ -1410,11 +1408,11 @@ static void rnp_set_rx_mode_hw_ops_n10(struct rnp_hw *hw, bool sriov_flag) { struct rnp_adapter *adapter = netdev_priv(netdev); - u32 fctrl, value; netdev_features_t features = netdev->features; - int count; struct rnp_eth_info *eth = &hw->eth; struct rnp_mac_info *mac = &hw->mac; + u32 fctrl, value; + int count; /* broadcast always bypass */ fctrl = eth_rd32(eth, RNP10_ETH_DMAC_FCTRL) | RNP10_FCTRL_BPE; @@ -1493,8 +1491,8 @@ static void rnp_clr_rar_hw_ops_n10(struct rnp_hw *hw, int idx) static void rnp_clr_rar_all_hw_ops_n10(struct rnp_hw *hw) { - struct rnp_eth_info *eth = &hw->eth; unsigned int rar_entries = hw->num_rar_entries - 1; + struct rnp_eth_info *eth = &hw->eth; int i; for (i = 0; i < rar_entries; i++) @@ -1662,9 +1660,9 @@ static void rnp_get_pause_mode_hw_ops_n10(struct rnp_hw *hw) static void rnp_update_hw_info_hw_ops_n10(struct rnp_hw *hw) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; struct rnp_dma_info *dma = &hw->dma; struct rnp_eth_info *eth = &hw->eth; - struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; u32 data; /* 1 enable eth filter */ eth_wr32(eth, RNP10_HOST_FILTER_EN, 1); @@ -1700,8 +1698,8 @@ static void rnp_update_hw_info_hw_ops_n10(struct rnp_hw *hw) static void rnp_update_hw_rx_drop_hw_ops_n10(struct rnp_hw *hw) { struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; - int i; struct rnp_ring *ring; + int i; for (i = 0; i < adapter->num_rx_queues; i++) { ring = adapter->rx_ring[i]; @@ -1807,8 +1805,8 @@ static void rnp_set_txvlan_mode_hw_ops_n10(struct rnp_hw *hw, bool cvlan) static void rnp_set_rss_key_hw_ops_n10(struct rnp_hw *hw, bool sriov_flag) { - struct rnp_eth_info *eth = &hw->eth; struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + struct rnp_eth_info *eth = &hw->eth; int key_len = RNP_RSS_KEY_SIZE; memcpy(hw->rss_key, adapter->rss_key, key_len); @@ -1830,7 +1828,18 @@ static void rnp_set_mbx_link_event_hw_ops_n10(struct rnp_hw *hw, static void rnp_set_mbx_ifup_hw_ops_n10(struct rnp_hw *hw, int enable) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + rnp_mbx_ifup_down(hw, enable); + + if (hw->phy_type != PHY_TYPE_10G_TP) + return; + /* first call reset an */ + if (enable) { + hw->ops.setup_link(hw, hw->phy.autoneg_advertised, + hw->autoneg, adapter->speed, + hw->duplex); + } } /** @@ -1872,9 +1881,9 @@ static s32 rnp_setup_mac_link_hw_ops_n10(struct rnp_hw *hw, u32 adv, u32 autoneg u32 speed, u32 duplex) { struct rnp_adapter *adpt = hw->back; - u32 value = 0; u32 value_r4 = 0; u32 value_r9 = 0; + u32 value = 0; rnp_logd(LOG_PHY, "%s setup phy: phy_addr=%d speed=%d", @@ -1887,13 +1896,117 @@ static s32 rnp_setup_mac_link_hw_ops_n10(struct rnp_hw *hw, u32 adv, u32 autoneg if (hw->is_backplane) return rnp_set_lane_fun(hw, LANE_FUN_AN, autoneg, 0, 0, 0); - if (!hw->is_sgmii) { - if (hw->force_10g_1g_speed_ablity) + if (!hw->is_sgmii && hw->phy_type != PHY_TYPE_10G_TP) { + if (hw->force_10g_1g_speed_ability) return rnp_mbx_force_speed(hw, speed); else return 0; } + if (hw->phy_type == PHY_TYPE_10G_TP) { + rnp_mbx_phy_read(hw, PHY_826x_MDIX, &value); + + value &= ~(BIT(8) | BIT(9)); + /* Options: 0: Auto (default) 1: MDI mode 2: MDI-X mode */ + switch (hw->phy.mdix) { + case 1: + value |= BIT(8) | BIT(9); + break; + case 2: + value |= BIT(9); + break; + case 0: + default: + break; + } + rnp_mbx_phy_write(hw, PHY_826x_MDIX, value); + + if (!autoneg) { + rnp_mbx_phy_read(hw, PHY_826x_SPEED, &value); + value &= (~(BIT(13) | BIT(6) | BIT(5) | BIT(4) | + BIT(3) | BIT(2))); + + switch (speed) { + case RNP_LINK_SPEED_10GB_FULL: + value |= BIT(13) | BIT(6); + break; + case RNP_LINK_SPEED_1GB_FULL: + case RNP_LINK_SPEED_1GB_HALF: + value |= BIT(6); + ; + break; + case RNP_LINK_SPEED_100_FULL: + case RNP_LINK_SPEED_100_HALF: + value |= BIT(13); + break; + case RNP_LINK_SPEED_10_FULL: + case RNP_LINK_SPEED_10_HALF: + value = 0; + break; + default: + hw_dbg(hw, "unknown speed = 0x%x.\n", speed); + break; + } + rnp_mbx_phy_write(hw, PHY_826x_SPEED, value); + rnp_mbx_phy_read(hw, PHY_826x_DUPLEX, &value); + value &= (~BIT(8)); + if (duplex) + value |= BIT(8); + rnp_mbx_phy_write(hw, PHY_826x_DUPLEX, value); + rnp_mbx_phy_read(hw, PHY_826x_AN, &value); + value &= (~BIT(12)); + rnp_mbx_phy_write(hw, PHY_826x_AN, value); + } else { + rnp_mbx_phy_read(hw, PHY_826x_ADV, &value); + + value &= (~(BIT(5) | BIT(6) | BIT(7) | BIT(8) | + BIT(10) | BIT(11))); + + if (adv & RNP_LINK_SPEED_100_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_100_FULL; + value |= BIT(8); + } + if (adv & RNP_LINK_SPEED_100_HALF) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_100_FULL; + value |= BIT(7); + } + + value |= BIT(10) | BIT(11); + /* BIT10 fc BIT11 asyfc */ + rnp_mbx_phy_write(hw, PHY_826x_ADV, value); + + rnp_mbx_phy_read(hw, PHY_826x_GBASE_ADV, &value); + value &= (~(BIT(7) | BIT(8) | BIT(12))); + + /* bit 7 2.5G bit 8 5G */ + if (adv & RNP_LINK_SPEED_10GB_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_10GB_FULL; + value |= BIT(12); + } + rnp_mbx_phy_write(hw, PHY_826x_GBASE_ADV, value); + rnp_mbx_phy_read(hw, PHY_826x_GBASE_ADV_2, &value); + value &= 0x00ff; + if (adv & RNP_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_1GB_FULL; + value |= BIT(9); + } + if (adv & RNP_LINK_SPEED_1GB_HALF) { + hw->phy.autoneg_advertised |= + RNP_LINK_SPEED_1GB_HALF; + value |= BIT(8); + } + rnp_mbx_phy_write(hw, PHY_826x_GBASE_ADV_2, value); + rnp_mbx_phy_read(hw, PHY_826x_AN, &value); + value |= BIT(12) | BIT(9); + rnp_mbx_phy_write(hw, PHY_826x_AN, value); + } + + return 0; + } /* Set MDI/MDIX mode */ rnp_mbx_phy_read(hw, RNP_YT8531_PHY_SPEC_CTRL, &value); value &= ~RNP_YT8531_PHY_SPEC_CTRL_MDIX_CFG_MASK; @@ -2057,8 +2170,8 @@ static void rnp_set_tcp_sync_hw_ops_n10(struct rnp_hw *hw, int queue, static void rnp_update_msix_count_hw_ops_n10(struct rnp_hw *hw, int msix_count) { - int msix_count_new; struct rnp_mac_info *mac = &hw->mac; + int msix_count_new; msix_count_new = clamp_t(int, msix_count, 2, RNP_N10_MSIX_VECTORS); mac->max_msix_vectors = msix_count_new; @@ -2090,6 +2203,8 @@ rnp_update_hw_status_hw_ops_n10(struct rnp_hw *hw, struct rnp_dma_info *dma = &hw->dma; struct rnp_eth_info *eth = &hw->eth; struct rnp_mac_info *mac = &hw->mac; + u64 rx_crc_errors = 0; + u64 rx_errors = 0; int port; hw_stats->dma_to_dma = @@ -2102,17 +2217,14 @@ rnp_update_hw_status_hw_ops_n10(struct rnp_hw *hw, dma_rd32(dma, RNP_DMA_STATS_DMA_TO_SWITCH); hw_stats->mac_to_dma = dma_rd32(dma, RNP_DMA_STATS_MAC_TO_DMA); - net_stats->rx_crc_errors = 0; - net_stats->rx_errors = 0; - hw_stats->mac_rx_csum_err = 0; for (port = 0; port < 4; port++) { - /* we use Hardware stats? */ - net_stats->rx_crc_errors += + /* we use Hardware stats */ + rx_crc_errors += eth_rd32(eth, RNP10_RXTRANS_CRC_ERR_PKTS(port)); - net_stats->rx_errors += + rx_errors += eth_rd32(eth, RNP10_RXTRANS_WDT_ERR_PKTS(port)) + eth_rd32(eth, RNP10_RXTRANS_CODE_ERR_PKTS(port)) + eth_rd32(eth, RNP10_RXTRANS_CRC_ERR_PKTS(port)) + @@ -2165,6 +2277,9 @@ rnp_update_hw_status_hw_ops_n10(struct rnp_hw *hw, hw_stats->mac_tx_pause_count += ((u64)mac_rd32(mac, RNP10_MAC_STATS_TX_PAUSE_COUNT_HIGH) << 32); + net_stats->rx_crc_errors = rx_crc_errors; + net_stats->rx_errors = rx_errors; + } enum n10_priv_bits { @@ -2189,16 +2304,17 @@ static const char rnp10_priv_flags_strings[][ETH_GSTRING_LEN] = { #define RNP10_SRIOV_VLAN_MODE BIT(10) #define RNP10_REMAP_MODE BIT(11) #define RNP10_LLDP_EN_STAT BIT(12) +#define RNP10_FORCE_CLOSE BIT(13) "mac_loopback", "switch_loopback", "veb_enable", "pcie_patch", "padding_debug", "ptp_performance_debug", "simulate_link_down", "vxlan_inner_match", "stag_enable", "mask_len_err", "sriov_vlan_mode", "remap_mode1", - "lldp_en", + "lldp_en", "link_down_on_close", }; #define RNP10_PRIV_FLAGS_STR_LEN ARRAY_SIZE(rnp10_priv_flags_strings) -const struct rnp_stats rnp10_gstrings_net_stats[] = { +static const struct rnp_stats rnp10_gstrings_net_stats[] = { RNP_NETDEV_STAT(rx_packets), RNP_NETDEV_STAT(tx_packets), RNP_NETDEV_STAT(rx_bytes), @@ -2294,12 +2410,13 @@ static int rnp_set_autoneg_adv_from_hw(struct rnp_hw *hw, struct ethtool_link_ksettings *ks) { u32 value_r0 = 0, value_r4 = 0, value_r9 = 0; + u32 value_r20, value_r412; /* Read autoneg state from phy */ if (hw->phy_type == PHY_TYPE_SGMII) { rnp_mbx_phy_read(hw, 0x0, &value_r0); /* Not support AN, return directly */ - if (!(value_r0 & BIT(12)) || !hw->link) + if (!(value_r0 & BIT(12))) return 0; rnp_mbx_phy_read(hw, 0x4, &value_r4); @@ -2330,6 +2447,29 @@ static int rnp_set_autoneg_adv_from_hw(struct rnp_hw *hw, } } + if (hw->phy_type == PHY_TYPE_10G_TP) { + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD(7) | 0x0), &value_r0); + + if (!(value_r0 & BIT(12))) + return 0; + + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD(7) | 0x20), &value_r20); + + if (value_r20 & BIT(12)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + + rnp_mbx_phy_read(hw, (PHY_C45 | PHY_MMD_VEND2 | 0xa412), + &value_r412); + + if (value_r412 & BIT(8)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + if (value_r412 & BIT(9)) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } + return 0; } @@ -2392,6 +2532,15 @@ static void rnp_phy_type_to_ethtool(struct rnp_adapter *adapter, rnp_set_autoneg_adv_from_hw(hw, ks); } + if (phy_type == PHY_TYPE_10G_TP) { + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + rnp_set_autoneg_adv_from_hw(hw, ks); + } + if (rnp_fw_is_old_ethtool(hw) && (supported_link & RNP_LINK_SPEED_40GB_FULL)) { supported_link |= RNP_SFP_MODE_40G_CR4 | @@ -2683,7 +2832,18 @@ static void rnp_get_settings_link_up(struct rnp_hw *hw, ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); break; - + case PHY_TYPE_10G_TP: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; default: /* if we got here and link is up something bad */ netdev_info(netdev, @@ -2730,7 +2890,8 @@ static void rnp_get_settings_link_down(struct rnp_hw *hw, ks->base.duplex = DUPLEX_UNKNOWN; /* if copper we should adv mdix info */ - if (hw->phy_type == PHY_TYPE_SGMII) { + if (hw->phy_type == PHY_TYPE_SGMII || + hw->phy_type == PHY_TYPE_10G_TP) { ks->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; ks->base.eth_tp_mdix_ctrl = hw->tp_mdix_ctrl; } @@ -2746,8 +2907,8 @@ static void rnp_get_settings_link_down(struct rnp_hw *hw, static int rnp_set_autoneg_state_from_hw(struct rnp_hw *hw, struct ethtool_link_ksettings *ks) { - int ret; struct rnp_adapter *adapter = hw->back; + int ret; ks->base.autoneg = (adapter->an ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -2764,13 +2925,25 @@ static int rnp_set_autoneg_state_from_hw(struct rnp_hw *hw, AUTONEG_DISABLE; } + if (hw->phy_type == PHY_TYPE_10G_TP) { + u32 value_r0 = 0; + + rnp_mbx_phy_read(hw, PHY_826x_AN, &value_r0); + + ks->base.autoneg = (value_r0 & BIT(12)) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; + if (value_r0) + adapter->an = 1; + } + return 0; } static int rnp_get_phy_mdix_from_hw(struct rnp_hw *hw) { - int ret; u32 value_r17 = 0; + int rmmd_reg = 0; + int ret; if (hw->phy_type == PHY_TYPE_SGMII) { ret = rnp_mbx_phy_read(hw, 0x11, &value_r17); @@ -2779,6 +2952,14 @@ static int rnp_get_phy_mdix_from_hw(struct rnp_hw *hw) hw->phy.is_mdix = !!(value_r17 & 0x0040); } + if (hw->phy_type == PHY_TYPE_10G_TP) { + rmmd_reg = (1 << 30) | (0x1f << 16) | (0xa430 & 0xffff); + ret = rnp_mbx_phy_read(hw, rmmd_reg, &value_r17); + if (ret) + return -1; + hw->phy.is_mdix = !!(value_r17 & 0x0200); + } + return 0; } @@ -2847,6 +3028,7 @@ static int rnp10_get_link_ksettings(struct net_device *netdev, } break; case PHY_TYPE_SGMII: + case PHY_TYPE_10G_TP: hw->phy.media_type = rnp_media_type_copper; ks->base.phy_address = adapter->phy_addr; break; @@ -2979,7 +3161,8 @@ static int rnp10_get_link_ksettings(struct net_device *netdev, #ifdef ETH_TP_MDI_X /* MDI-X => 2; MDI =>1; Invalid =>0 */ - if (hw->phy_type == PHY_TYPE_SGMII) { + if (hw->phy_type == PHY_TYPE_SGMII || + hw->phy_type == PHY_TYPE_10G_TP) { if (rnp_get_phy_mdix_from_hw(hw)) { ks->base.eth_tp_mdix = ETH_TP_MDI_INVALID; } else { @@ -2987,6 +3170,8 @@ static int rnp10_get_link_ksettings(struct net_device *netdev, ETH_TP_MDI_X : ETH_TP_MDI; } + } else { + ks->base.eth_tp_mdix = hw->tp_mdx; } #ifdef ETH_TP_MDI_AUTO @@ -3012,13 +3197,13 @@ static int rnp10_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *ks) { struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_hw *hw = &adapter->hw; struct ethtool_link_ksettings safe_ks; struct ethtool_link_ksettings copy_ks; + u32 advertising_link_speed, speed = 0; + struct rnp_hw *hw = &adapter->hw; int timeout = 50; int err = 0; u8 autoneg; - u32 advertising_link_speed, speed = 0; /* copy the ksettings to copy_ks to avoid modifying the origin */ memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); @@ -3129,6 +3314,7 @@ static int rnp10_set_link_ksettings(struct net_device *netdev, goto done; } } + hw->advertised_link = advertising_link_speed; hw->autoneg = true; } else { @@ -3147,6 +3333,13 @@ static int rnp10_set_link_ksettings(struct net_device *netdev, goto done; } } + /* if 10G -TP, not support close an */ + if (hw->phy_type == PHY_TYPE_10G_TP) { + netdev_info(netdev, + "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + goto done; + } /* Only allow one speed at a time when autoneg is AUTONEG_DISABLE. */ switch (ks->base.speed) { @@ -3179,6 +3372,9 @@ static int rnp10_set_link_ksettings(struct net_device *netdev, if (hw->is_sgmii) hw->duplex = ks->base.duplex; + if (hw->phy_type == PHY_TYPE_10G_TP) + hw->duplex = ks->base.duplex; + /* this sets the link speed and restarts auto-neg */ while (test_and_set_bit(__RNP_IN_SFP_INIT, &adapter->state)) { timeout--; @@ -3215,14 +3411,14 @@ static void rnp10_get_drvinfo(struct net_device *netdev, struct rnp_hw *hw = &adapter->hw; strscpy(drvinfo->driver, rnp_driver_name, sizeof(drvinfo->driver)); - snprintf(drvinfo->version, sizeof(drvinfo->version), "%s-%x", - rnp_driver_version, hw->pcode); + snprintf(drvinfo->version, sizeof(drvinfo->version), "%s", + rnp_driver_version); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d.%d 0x%08x", ((char *)&hw->fw_version)[3], - ((char *)&hw->fw_version)[2], - ((char *)&hw->fw_version)[1], - ((char *)&hw->fw_version)[0], hw->bd_uid); + "%d.%d.%d.%d", ((unsigned char *)&hw->fw_version)[3], + ((unsigned char *)&hw->fw_version)[2], + ((unsigned char *)&hw->fw_version)[1], + ((unsigned char *)&hw->fw_version)[0]); strscpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); @@ -3561,6 +3757,8 @@ static u32 rnp10_get_priv_flags(struct net_device *netdev) priv_flags |= RNP10_REMAP_MODE; if (adapter->priv_flags & RNP_PRIV_FLAG_LLDP_EN_STAT) priv_flags |= RNP10_LLDP_EN_STAT; + if (adapter->priv_flags & RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) + priv_flags |= RNP10_FORCE_CLOSE; return priv_flags; } @@ -3705,6 +3903,31 @@ static int rnp10_set_priv_flags(struct net_device *netdev, u32 priv_flags) hw->ops.set_vf_vlan_mode(hw, 0, i, false); } } + + if (hw->force_link_supported) { + if (priv_flags & RNP10_FORCE_CLOSE) { + if (!(adapter->priv_flags & + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE)) { + adapter->priv_flags |= + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE; + hw->ops.driver_status(hw, true, + rnp_driver_force_control_mac); + } + } else { + if (adapter->priv_flags & + RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE) { + adapter->priv_flags &= + (~RNP_PRIV_FLAG_LINK_DOWN_ON_CLOSE); + hw->ops.driver_status(hw, false, + rnp_driver_force_control_mac); + } + } + } else { + if (priv_flags & RNP10_FORCE_CLOSE) + rnp_err("%s: firmware not support set `link_down_on_close` private flag\n", + adapter->netdev->name); + } + skip_setup_vf_vlan: if (data_old != data_new) @@ -3721,11 +3944,11 @@ static int rnp10_set_priv_flags(struct net_device *netdev, u32 priv_flags) static void rnp10_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { - struct rnp_adapter *adapter = netdev_priv(netdev); struct net_device_stats *net_stats = &netdev->stats; + struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_ring *ring; - int i, j; char *p = NULL; + int i, j; rnp_update_stats(adapter); @@ -3948,8 +4171,8 @@ static void rnp_set_ethtool_hw_ops_n10(struct net_device *netdev) **/ static s32 rnp_get_thermal_sensor_data_hw_ops_n10(struct rnp_hw *hw) { - int voltage = 0; struct rnp_thermal_sensor_data *data = &hw->thermal_sensor_data; + int voltage = 0; data->sensor[0].temp = rnp_mbx_get_temp(hw, &voltage); @@ -3964,8 +4187,8 @@ static s32 rnp_get_thermal_sensor_data_hw_ops_n10(struct rnp_hw *hw) **/ static s32 rnp_init_thermal_sensor_thresh_hw_ops_n10(struct rnp_hw *hw) { - u8 i; struct rnp_thermal_sensor_data *data = &hw->thermal_sensor_data; + u8 i; for (i = 0; i < RNP_MAX_SENSORS; i++) { data->sensor[i].location = i + 1; @@ -4001,13 +4224,36 @@ static s32 rnp_phy_write_reg_hw_ops_n10(struct rnp_hw *hw, u32 reg_addr, static void rnp_set_vf_vlan_mode_hw_ops_n10(struct rnp_hw *hw, u16 vlan, int vf, bool enable) { - struct rnp_eth_info *eth = &hw->eth; struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; + struct rnp_eth_info *eth = &hw->eth; if (adapter->priv_flags & RNP_PRIV_FLAG_SRIOV_VLAN_MODE) eth->ops.set_vf_vlan_mode(eth, vlan, vf, enable); } +/** + * rnp_driver_status_hw_ops_n10 - notify status to firmware + * + * @hw: hw pointer + * @enable: state + * @mode: cmd + */ +static void rnp_driver_status_hw_ops_n10(struct rnp_hw *hw, bool enable, + int mode) +{ + switch (mode) { + case rnp_driver_insmod: + rnp_mbx_ifinsmod(hw, enable); + break; + case rnp_driver_suspuse: + rnp_mbx_ifsuspuse(hw, enable); + break; + case rnp_driver_force_control_mac: + rnp_mbx_ifforce_control_mac(hw, enable); + break; + } +} + static struct rnp_hw_operations hw_ops_n10 = { .init_hw = &rnp_init_hw_ops_n10, .reset_hw = &rnp_reset_hw_ops_n10, @@ -4061,6 +4307,7 @@ static struct rnp_hw_operations hw_ops_n10 = { .phy_read_reg = &rnp_phy_read_reg_hw_ops_n10, .phy_write_reg = &rnp_phy_write_reg_hw_ops_n10, .set_vf_vlan_mode = &rnp_set_vf_vlan_mode_hw_ops_n10, + .driver_status = &rnp_driver_status_hw_ops_n10, }; static void rnp_mac_set_rx_n10(struct rnp_mac_info *mac, bool status) @@ -4143,10 +4390,10 @@ static void rnp_mac_fcs_n10(struct rnp_mac_info *mac, bool status) **/ static s32 rnp_mac_fc_mode_n10(struct rnp_mac_info *mac) { + u32 rxctl_reg, txctl_reg[RNP_MAX_TRAFFIC_CLASS]; struct rnp_hw *hw = (struct rnp_hw *)mac->back; s32 ret_val = 0; u32 reg; - u32 rxctl_reg, txctl_reg[RNP_MAX_TRAFFIC_CLASS]; int i; /* Validate the water mark configuration for packet buffer 0. Zero @@ -4245,11 +4492,11 @@ static struct rnp_mac_operations mac_ops_n10 = { static s32 rnp_get_invariants_n10(struct rnp_hw *hw) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; struct rnp_mac_info *mac = &hw->mac; struct rnp_dma_info *dma = &hw->dma; struct rnp_eth_info *eth = &hw->eth; struct rnp_mbx_info *mbx = &hw->mbx; - struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; int i; /* setup dma info */ @@ -4400,11 +4647,11 @@ struct rnp_info rnp_n10_info = { static s32 rnp_get_invariants_n400(struct rnp_hw *hw) { + struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; struct rnp_mac_info *mac = &hw->mac; struct rnp_dma_info *dma = &hw->dma; struct rnp_eth_info *eth = &hw->eth; struct rnp_mbx_info *mbx = &hw->mbx; - struct rnp_adapter *adapter = (struct rnp_adapter *)hw->back; int i; /* setup dma info */ diff --git a/drivers/net/ethernet/mucse/rnp/rnp_param.c b/drivers/net/ethernet/mucse/rnp/rnp_param.c index c97898c9f525..483085c430d5 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_param.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_param.c @@ -177,10 +177,8 @@ static int rnp_validate_option(struct net_device *netdev, **/ void rnp_check_options(struct rnp_adapter *adapter) { - //unsigned int mdd; - int bd = adapter->bd_number; u32 *aflags = &adapter->flags; - //struct rnp_ring_feature *feature = adapter->ring_feature; + int bd = adapter->bd_number; if (bd >= RNP_MAX_NIC) { netdev_notice(adapter->netdev, diff --git a/drivers/net/ethernet/mucse/rnp/rnp_ptp.c b/drivers/net/ethernet/mucse/rnp/rnp_ptp.c index d7fcea8d7914..84e8bfa09803 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_ptp.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_ptp.c @@ -164,7 +164,7 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, return 0; } -const struct rnp_hwtimestamp mac_ptp = { +static const struct rnp_hwtimestamp mac_ptp = { .config_hw_tstamping = config_hw_tstamping, .config_mac_irq_enable = config_mac_interrupt_enable, .init_systime = init_systime, @@ -198,10 +198,10 @@ static int rnp_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct rnp_adapter *pf = container_of(ptp, struct rnp_adapter, ptp_clock_ops); - unsigned long flags; - u32 sec, nsec; u32 quotient, reminder; + unsigned long flags; int neg_adj = 0; + u32 sec, nsec; if (delta < 0) { neg_adj = 1; @@ -274,9 +274,9 @@ int rnp_ptp_get_ts_config(struct rnp_adapter *pf, struct ifreq *ifr) static int rnp_ptp_setup_ptp(struct rnp_adapter *pf, u32 value) { + struct timespec64 now; u32 sec_inc = 0; u64 temp = 0; - struct timespec64 now; /*For now just use extrnal clock(the kernel-system clock)*/ // value |= RNP_PTP_TCR_ESTI; @@ -340,13 +340,13 @@ static int rnp_ptp_setup_ptp(struct rnp_adapter *pf, u32 value) int rnp_ptp_set_ts_config(struct rnp_adapter *pf, struct ifreq *ifr) { struct hwtstamp_config config; - u32 ptp_v2 = 0; - u32 tstamp_all = 0; u32 ptp_over_ipv4_udp = 0; u32 ptp_over_ipv6_udp = 0; u32 ptp_over_ethernet = 0; u32 snap_type_sel = 0; u32 ts_master_en = 0; + u32 tstamp_all = 0; + u32 ptp_v2 = 0; u32 value = 0; s32 ret = -1; @@ -650,6 +650,7 @@ void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *adapter, union rnp_rx_desc *desc, struct sk_buff *skb) { u64 ns = 0; + __be32 value_h, value_l; u64 tsvalueh = 0, tsvaluel = 0; //static int test = 0; struct skb_shared_hwtstamps *hwtstamps = NULL; @@ -660,7 +661,7 @@ void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *adapter, return; } - if (likely(!(desc->wb.cmd & RNP_RXD_STAT_PTP))) + if (likely(!(desc->wb.cmd & cpu_to_le16(RNP_RXD_STAT_PTP)))) return; hwtstamps = skb_hwtstamps(skb); /* because of rx hwstamp store before the mac head @@ -672,13 +673,13 @@ void rnp_ptp_get_rx_hwstamp(struct rnp_adapter *adapter, * high32bit is seconds low32bits is nanoseconds */ skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE, - &tsvalueh, RNP_RX_SEC_SIZE); + &value_h, RNP_RX_SEC_SIZE); skb_copy_from_linear_data_offset(skb, RNP_RX_TIME_RESERVE + - RNP_RX_SEC_SIZE, &tsvaluel, + RNP_RX_SEC_SIZE, &value_l, RNP_RX_NANOSEC_SIZE); skb_pull(skb, RNP_RX_HWTS_OFFSET); - tsvalueh = ntohl(tsvalueh); - tsvaluel = ntohl(tsvaluel); + tsvalueh = ntohl(value_h); + tsvaluel = ntohl(value_l); ns = tsvaluel & RNP_RX_NSEC_MASK; ns += ((tsvalueh & RNP_RX_SEC_MASK) * 1000000000ULL); diff --git a/drivers/net/ethernet/mucse/rnp/rnp_regs.h b/drivers/net/ethernet/mucse/rnp/rnp_regs.h index 939fddb5903b..753aeb2613bd 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_regs.h +++ b/drivers/net/ethernet/mucse/rnp/rnp_regs.h @@ -23,6 +23,7 @@ #define RNP_DMA_VERSION (0x0000) #define RNP_DMA_CONFIG (0x0004) +#define RNP_DMA_AXI_READY (0x0014) #define DMA_MAC_LOOPBACK (1 << 0) #define DMA_SWITCH_LOOPBACK (1 << 1) #define DMA_VEB_BYPASS (1 << 4) diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sriov.c b/drivers/net/ethernet/mucse/rnp/rnp_sriov.c index 746a55602880..e98cb238f2e4 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_sriov.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_sriov.c @@ -22,10 +22,10 @@ int rnp_msg_post_status_signle(struct rnp_adapter *adapter, #ifdef CONFIG_PCI_IOV static int __rnp_enable_sriov(struct rnp_adapter *adapter) { - struct rnp_hw *hw = &adapter->hw; int num_vf_macvlans, i, num_vebvlans; - struct vf_macvlans *mv_list; struct vf_vebvlans *vv_list = NULL; + struct rnp_hw *hw = &adapter->hw; + struct vf_macvlans *mv_list; /* sriov and dcb cannot open together */ /* reset numtc */ @@ -129,8 +129,8 @@ void rnp_enable_sriov_true(struct rnp_adapter *adapter) */ void rnp_enable_sriov(struct rnp_adapter *adapter) { - int pre_existing_vfs = 0; struct rnp_hw *hw = &adapter->hw; + int pre_existing_vfs = 0; pre_existing_vfs = pci_num_vf(adapter->pdev); if (!pre_existing_vfs && !adapter->num_vfs) @@ -179,10 +179,10 @@ void rnp_enable_sriov(struct rnp_adapter *adapter) static bool rnp_vfs_are_assigned(struct rnp_adapter *adapter) { - struct pci_dev *pdev = adapter->pdev; - struct pci_dev *vfdev; unsigned int dev_id = RNP_DEV_ID_N10_PF0_VF_N; unsigned int vendor_id = PCI_VENDOR_ID_MUCSE; + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; switch (adapter->pdev->device) { case RNP_DEV_ID_N10_PF0: @@ -222,8 +222,8 @@ static bool rnp_vfs_are_assigned(struct rnp_adapter *adapter) int rnp_disable_sriov(struct rnp_adapter *adapter) { struct rnp_hw *hw = &adapter->hw; - int rss; int time = 0; + int rss; if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) return 0; @@ -317,20 +317,43 @@ static bool check_ari_mode(struct pci_dev *dev) return bus->self && bus->self->ari_enabled; } +static int wait_rx_change_down(struct rnp_adapter *adapter) +{ + struct rnp_ring *ring; + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + ring = adapter->rx_ring[i]; + + if ((ring->ring_flags & RNP_RING_FLAG_DO_RESET_RX_LEN) || + (ring->ring_flags & RNP_RING_FLAG_DELAY_SETUP_RX_LEN)) { + dev_err(&adapter->pdev->dev, + "we must wait rx depth set done\n"); + return -EOPNOTSUPP; + } + } + return 0; +} + static int rnp_pci_sriov_enable(struct pci_dev *dev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct rnp_adapter *adapter = pci_get_drvdata(dev); + int pre_existing_vfs = pci_num_vf(dev); struct rnp_hw *hw = &adapter->hw; int err = 0; int i; - int pre_existing_vfs = pci_num_vf(dev); if (pre_existing_vfs && pre_existing_vfs != num_vfs) err = rnp_disable_sriov(adapter); else if (pre_existing_vfs && pre_existing_vfs == num_vfs) goto out; + if (wait_rx_change_down(adapter)) { + err = -EOPNOTSUPP; + goto err_out; + } + /* check vlan setup before sriov enable */ if (adapter->vlan_count > 1) { dev_err(&adapter->pdev->dev, @@ -402,10 +425,10 @@ static int rnp_pci_sriov_enable(struct pci_dev *dev, int num_vfs) static int rnp_pci_sriov_disable(struct pci_dev *dev) { struct rnp_adapter *adapter = pci_get_drvdata(dev); - int err; #ifdef CONFIG_PCI_IOV u32 current_flags = adapter->flags; #endif + int err; err = rnp_disable_sriov(adapter); @@ -426,8 +449,8 @@ static int rnp_set_vf_multicasts(struct rnp_adapter *adapter, u32 *msgbuf, { int entries = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT; - u16 *hash_list = (u16 *)&msgbuf[1]; struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; + u16 *hash_list = (u16 *)&msgbuf[1]; struct rnp_hw *hw = &adapter->hw; int i; @@ -455,9 +478,9 @@ static int rnp_set_vf_multicasts(struct rnp_adapter *adapter, u32 *msgbuf, void rnp_restore_vf_macs(struct rnp_adapter *adapter) { struct rnp_hw *hw = &adapter->hw; - int vf; - u8 *mac_addr; int rar_entry; + u8 *mac_addr; + int vf; for (vf = 0; vf < adapter->num_vfs; vf++) { mac_addr = adapter->vfinfo[vf].vf_mac_addresses; @@ -475,8 +498,8 @@ void rnp_restore_vf_macs(struct rnp_adapter *adapter) void rnp_restore_vf_macvlans(struct rnp_adapter *adapter) { struct rnp_hw *hw = &adapter->hw; - struct list_head *pos; struct vf_macvlans *entry; + struct list_head *pos; list_for_each(pos, &adapter->vf_mvs.l) { entry = list_entry(pos, struct vf_macvlans, l); @@ -550,9 +573,10 @@ static int rnp_set_vf_vlan(struct rnp_adapter *adapter, int add, int vid, static inline void rnp_vf_reset_event(struct rnp_adapter *adapter, u32 vf) { struct rnp_hw *hw = &adapter->hw; - int rar_entry = hw->mac.num_rar_entries - (vf + 1); + int rar_entry; int i; + rar_entry = hw->mac.num_rar_entries - (vf + 1); /* reset multicast table array for vf */ adapter->vfinfo[vf].num_vf_mc_hashes = 0; @@ -578,7 +602,9 @@ static int rnp_set_vf_mac(struct rnp_adapter *adapter, int vf, { struct rnp_hw *hw = &adapter->hw; /* this rar_entry may be cofict with mac vlan with pf */ - int rar_entry = hw->mac.num_rar_entries - (vf + 1); + int rar_entry; + + rar_entry = hw->mac.num_rar_entries - (vf + 1); memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6); @@ -596,8 +622,8 @@ static int rnp_set_vf_macvlan(struct rnp_adapter *adapter, int vf, int index, unsigned char *mac_addr) { struct rnp_hw *hw = &adapter->hw; - struct list_head *pos; struct vf_macvlans *entry; + struct list_head *pos; // index = 0 , only earase // index = 1 , earase and then set if (index <= 1) { @@ -656,11 +682,11 @@ static int rnp_set_vf_macvlan(struct rnp_adapter *adapter, int vf, int rnp_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { - unsigned char vf_mac_addr[6]; struct rnp_adapter *adapter = pci_get_drvdata(pdev); + bool enable = ((event_mask & 0x10000000U) != 0); unsigned int vfn = (event_mask & 0x3f); + unsigned char vf_mac_addr[6]; - bool enable = ((event_mask & 0x10000000U) != 0); if (enable) { eth_zero_addr(vf_mac_addr); @@ -677,9 +703,9 @@ int rnp_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) static int rnp_vf_reset_msg(struct rnp_adapter *adapter, u32 vf) { - struct rnp_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; u32 msgbuf[RNP_VF_PERMADDR_MSG_LEN]; + struct rnp_hw *hw = &adapter->hw; u8 *addr = (u8 *)(&msgbuf[1]); /* reset the filters for the device */ @@ -739,6 +765,8 @@ static int rnp_vf_reset_msg(struct rnp_adapter *adapter, u32 vf) } msgbuf[RNP_VF_AXI_MHZ] = hw->usecstocount; + /* we start from 0 */ + msgbuf[RNP_VF_FEATURE] = 0; if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) msgbuf[RNP_VF_FEATURE] |= PF_FEATRURE_VLAN_FILTER; @@ -831,9 +859,9 @@ static int rnp_set_vf_vlan_msg(struct rnp_adapter *adapter, u32 *msgbuf, static int rnp_set_vf_vlan_strip_msg(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) { - struct rnp_hw *hw = &adapter->hw; int vlan_strip_on = !!(msgbuf[1] >> 31); int queue_cnt = msgbuf[1] & 0xffff; + struct rnp_hw *hw = &adapter->hw; int err = 0, i; vf_dbg("strip_on:%d queeu_cnt:%d, %d %d\n", vlan_strip_on, @@ -852,9 +880,9 @@ static int rnp_set_vf_vlan_strip_msg(struct rnp_adapter *adapter, static int rnp_set_vf_macvlan_msg(struct rnp_adapter *adapter, u32 *msgbuf, u32 vf) { - u8 *new_mac = ((u8 *)(&msgbuf[1])); int index = (msgbuf[0] & RNP_VT_MSGINFO_MASK) >> RNP_VT_MSGINFO_SHIFT; + u8 *new_mac = ((u8 *)(&msgbuf[1])); int err; if (adapter->vfinfo[vf].pf_set_mac && index > 0) { @@ -966,7 +994,12 @@ static int rnp_get_vf_queues(struct rnp_adapter *adapter, u32 *msgbuf, /* vf0 use ring4 */ /* vf1 use ring8 */ msgbuf[RNP_VF_QUEUE_START] = vf * 4 + 4; - + } else if ((hw->hw_type == rnp_hw_n10) && (hw->sriov_ring_limit == 1)) { + // some user only want 1 ring for each vf? + if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) + msgbuf[RNP_VF_QUEUE_START] = vf * 2 + 2; + else + msgbuf[RNP_VF_QUEUE_START] = vf * 2; } else { if (hw->feature_flags & RNP_NET_FEATURE_VF_FIXED) { msgbuf[RNP_VF_QUEUE_START] = @@ -986,8 +1019,8 @@ static int rnp_get_vf_queues(struct rnp_adapter *adapter, u32 *msgbuf, static int rnp_rcv_msg_from_vf(struct rnp_adapter *adapter, u32 vf) { u32 mbx_size = RNP_VFMAILBOX_SIZE; - u32 msgbuf[RNP_VFMAILBOX_SIZE]; struct rnp_hw *hw = &adapter->hw; + u32 msgbuf[RNP_VFMAILBOX_SIZE]; s32 retval; retval = rnp_read_mbx(hw, msgbuf, mbx_size, vf); @@ -1137,9 +1170,9 @@ void rnp_msg_task(struct rnp_adapter *adapter) static int rnp_msg_post_status_signle_link(struct rnp_adapter *adapter, int vf, int link_state) { - u32 msgbuf[RNP_VFMAILBOX_SIZE]; struct rnp_hw *hw = &adapter->hw; struct rnp_mbx_info *mbx = &hw->mbx; + u32 msgbuf[RNP_VFMAILBOX_SIZE]; msgbuf[0] = RNP_PF_SET_LINK | (vf << RNP_VNUM_OFFSET); @@ -1164,9 +1197,9 @@ static int rnp_msg_post_status_signle_link(struct rnp_adapter *adapter, int vf, int rnp_msg_post_status_signle(struct rnp_adapter *adapter, enum PF_STATUS status, int vf) { - u32 msgbuf[RNP_VFMAILBOX_SIZE]; struct rnp_hw *hw = &adapter->hw; struct rnp_mbx_info *mbx = &hw->mbx; + u32 msgbuf[RNP_VFMAILBOX_SIZE]; switch (status) { case PF_FCS_STATUS: @@ -1230,8 +1263,8 @@ int rnp_msg_post_status_signle(struct rnp_adapter *adapter, /* try to send mailbox to all active vf */ int rnp_msg_post_status(struct rnp_adapter *adapter, enum PF_STATUS status) { - u32 vf; int err = 0; + u32 vf; if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) return err; @@ -1279,9 +1312,9 @@ int rnp_get_vf_ringnum(struct rnp_hw *hw, int vf, int num) int rnp_setup_ring_maxrate(struct rnp_adapter *adapter, int ring, u64 max_rate) { + int samples_1sec = adapter->hw.usecstocount * 1000000; struct rnp_hw *hw = &adapter->hw; struct rnp_dma_info *dma = &hw->dma; - int samples_1sec = adapter->hw.usecstocount * 1000000; dma_ring_wr32(dma, RING_OFFSET(ring) + RNP_DMA_REG_TX_FLOW_CTRL_TM, samples_1sec); @@ -1359,8 +1392,8 @@ static int rnp_enable_port_vlan(struct rnp_adapter *adapter, int vf, int rnp_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { - int err = 0; struct rnp_adapter *adapter = netdev_priv(netdev); + int err = 0; /* VLAN IDs accepted range 0-4094 */ if (vf < 0 || vf >= adapter->num_vfs || vlan > VLAN_VID_MASK - 1 || @@ -1490,11 +1523,11 @@ int rnp_ndo_set_vf_bw(struct net_device *netdev, int vf, { struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int link_speed = 0; + u64 real_rate = 0; /* limit vf ring rate */ int ring_max_rate; int vf_ring; - int link_speed = 0; - u64 real_rate = 0; if (vf >= hw->max_vfs - 1) return -EINVAL; @@ -1577,7 +1610,21 @@ int rnp_ndo_get_vf_config(struct net_device *netdev, int vf, ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; - ivi->linkstate = adapter->vfinfo[vf].link_state; + + switch (adapter->vfinfo[vf].link_state) { + case rnp_link_state_on: + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + break; + case rnp_link_state_off: + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + break; + case rnp_link_state_auto: + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + break; + default: + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + } + ivi->trusted = adapter->vfinfo[vf].trusted; return 0; diff --git a/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c b/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c index 91e277cc7171..516d2d970a24 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c +++ b/drivers/net/ethernet/mucse/rnp/rnp_sysfs.c @@ -126,8 +126,8 @@ rnp_hwmon_show_maxopthresh(struct device __always_unused *dev, static int rnp_add_hwmon_attr(struct rnp_adapter *adapter, unsigned int offset, int type) { - unsigned int n_attr; struct hwmon_attr *rnp_attr; + unsigned int n_attr; n_attr = adapter->rnp_hwmon_buff->n_hwmon; rnp_attr = &adapter->rnp_hwmon_buff->hwmon_list[n_attr]; @@ -208,13 +208,13 @@ static ssize_t maintain_write(struct file *filp, struct kobject *kobj, loff_t off, size_t count) { struct device *dev = kobj_to_dev(kobj); - int err = -EINVAL; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; struct maintain_req *req; void *dma_buf = NULL; dma_addr_t dma_phy; + int err = -EINVAL; int bytes; if (off == 0) { @@ -298,16 +298,71 @@ static ssize_t maintain_write(struct file *filp, struct kobject *kobj, static BIN_ATTR(maintain, 0644, maintain_read, maintain_write, 1 * 1024 * 1024); +static ssize_t version_info_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "driver :%s-%x\n", + rnp_driver_version, hw->pcode); + ret += sprintf(buf + ret, "fw :%d.%d.%d.%d 0x%08x", + ((char *)&hw->fw_version)[3], + ((char *)&hw->fw_version)[2], + ((char *)&hw->fw_version)[1], + ((char *)&hw->fw_version)[0], hw->bd_uid); + + return ret; +} + +static ssize_t ring_sriov_info_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = 0; + + ret += sprintf(buf + ret, "now sriov ring num is %d\n", hw->sriov_ring_limit); + + return ret; +} + +static ssize_t ring_sriov_info_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = to_net_device(dev); + struct rnp_adapter *adapter = netdev_priv(netdev); + struct rnp_hw *hw = &adapter->hw; + int ret = count; + u32 sriov_ring_num; + + if (kstrtou32(buf, 0, &sriov_ring_num) != 0) + return -EINVAL; + /* should check tx_ring_num is valid */ + if (sriov_ring_num < 2) + hw->sriov_ring_limit = sriov_ring_num; + else + ret = -EINVAL; + + return ret; +} + static ssize_t rx_desc_info_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); u32 rx_ring_num = adapter->sysfs_rx_ring_num; - u32 rx_desc_num = adapter->sysfs_rx_desc_num; struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; - int ret = 0; + u32 rx_desc_num = adapter->sysfs_rx_desc_num; union rnp_rx_desc *desc; + int ret = 0; if (test_bit(__RNP_DOWN, &adapter->state)) return ret; @@ -327,10 +382,10 @@ static ssize_t rx_desc_info_store(struct device *dev, { struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); - int ret = count; u32 rx_desc_num = adapter->sysfs_rx_desc_num; u32 rx_ring_num = adapter->sysfs_rx_ring_num; struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; + int ret = count; if (test_bit(__RNP_DOWN, &adapter->state)) return ret; @@ -375,8 +430,8 @@ static ssize_t tcp_sync_info_store(struct device *dev, struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; - int ret = count; u32 tcp_sync_queue; + int ret = count; if (kstrtou32(buf, 0, &tcp_sync_queue) != 0) return -EINVAL; @@ -427,8 +482,8 @@ static ssize_t rx_skip_info_store(struct device *dev, struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; - int ret = count; u32 rx_skip_count; + int ret = count; if (kstrtou32(buf, 0, &rx_skip_count) != 0) return -EINVAL; @@ -467,8 +522,8 @@ static ssize_t rx_drop_info_store(struct device *dev, struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; - int ret = count; u64 rx_drop_status; + int ret = count; if (kstrtou64(buf, 0, &rx_drop_status) != 0) return -EINVAL; @@ -520,8 +575,8 @@ static ssize_t outer_vlan_info_store(struct device *dev, struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; - int ret = count; u32 outer_vlan_type; + int ret = count; if (kstrtou32(buf, 0, &outer_vlan_type) != 0) return -EINVAL; @@ -559,7 +614,6 @@ static ssize_t tx_stags_info_store(struct device *dev, struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; - struct rnp_eth_info *eth = &hw->eth; int ret = count; u16 tx_stags; @@ -582,10 +636,10 @@ static ssize_t tx_desc_info_show(struct device *dev, struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); u32 tx_ring_num = adapter->sysfs_tx_ring_num; - u32 tx_desc_num = adapter->sysfs_tx_desc_num; struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; - int ret = 0; + u32 tx_desc_num = adapter->sysfs_tx_desc_num; struct rnp_tx_desc *desc; + int ret = 0; if (test_bit(__RNP_DOWN, &adapter->state)) return ret; @@ -606,10 +660,10 @@ static ssize_t tx_desc_info_store(struct device *dev, { struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); - int ret = count; u32 tx_desc_num = adapter->sysfs_tx_desc_num; u32 tx_ring_num = adapter->sysfs_tx_ring_num; struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; + int ret = count; if (test_bit(__RNP_DOWN, &adapter->state)) return ret; @@ -627,12 +681,12 @@ static ssize_t tx_desc_info_store(struct device *dev, static ssize_t para_info_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; struct rnp_eth_info *eth = &hw->eth; struct rnp_mac_info *mac = &hw->mac; + int ret = 0; ret += sprintf(buf + ret, "nsi_en:%d\n", hw->ncsi_en); ret += sprintf(buf + ret, @@ -683,8 +737,8 @@ static ssize_t rx_ring_info_show(struct device *dev, struct rnp_adapter *adapter = netdev_priv(netdev); u32 rx_ring_num = adapter->sysfs_rx_ring_num; struct rnp_ring *ring = adapter->rx_ring[rx_ring_num]; - int ret = 0; union rnp_rx_desc *rx_desc; + int ret = 0; if (test_bit(__RNP_DOWN, &adapter->state)) return ret; @@ -707,8 +761,8 @@ static ssize_t rx_ring_info_store(struct device *dev, { struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); - int ret = count; u32 rx_ring_num = adapter->sysfs_rx_ring_num; + int ret = count; if (test_bit(__RNP_DOWN, &adapter->state)) return ret; @@ -730,9 +784,9 @@ static ssize_t tx_ring_info_show(struct device *dev, struct rnp_adapter *adapter = netdev_priv(netdev); u32 tx_ring_num = adapter->sysfs_tx_ring_num; struct rnp_ring *ring = adapter->tx_ring[tx_ring_num]; - int ret = 0; struct rnp_tx_buffer *tx_buffer; struct rnp_tx_desc *eop_desc; + int ret = 0; if (test_bit(__RNP_DOWN, &adapter->state)) return ret; @@ -762,8 +816,8 @@ static ssize_t tx_ring_info_store(struct device *dev, { struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); - int ret = count; u32 tx_ring_num = adapter->sysfs_tx_ring_num; + int ret = count; if (test_bit(__RNP_DOWN, &adapter->state)) return ret; @@ -778,347 +832,16 @@ static ssize_t tx_ring_info_store(struct device *dev, return ret; } -static ssize_t tx_counter_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u32 val = 0; - int ret = 0; - struct net_device *netdev = to_net_device(dev); - struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_hw *hw = &adapter->hw; - - ret += sprintf(buf + ret, "tx counters\n"); - ret += sprintf(buf + ret, "ring0-tx:\n"); - - val = rd32(hw, RNP_DMA_REG_TX_DESC_BUF_LEN); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "len:", RNP_DMA_REG_TX_DESC_BUF_LEN, val); - - val = rd32(hw, RNP_DMA_REG_TX_DESC_BUF_HEAD); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "head:", RNP_DMA_REG_TX_DESC_BUF_HEAD, val); - - val = rd32(hw, RNP_DMA_REG_TX_DESC_BUF_TAIL); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "tail:", RNP_DMA_REG_TX_DESC_BUF_TAIL, val); - - ret += sprintf(buf + ret, "to_1to4_p1:\n"); - - val = rd32(hw, RNP_ETH_1TO4_INST0_IN_PKTS); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "emac_in:", RNP_ETH_1TO4_INST0_IN_PKTS, val); - - val = rd32(hw, RNP_ETH_IN_0_TX_PKT_NUM(0)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "emac_send:", RNP_ETH_IN_0_TX_PKT_NUM(0), val); - - ret += sprintf(buf + ret, "to_1to4_p2:\n"); - - val = rd32(hw, RNP_ETH_IN_1_TX_PKT_NUM(0)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "sop_pkt:", RNP_ETH_IN_1_TX_PKT_NUM(0), val); - - val = rd32(hw, RNP_ETH_IN_2_TX_PKT_NUM(0)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "eop_pkt:", RNP_ETH_IN_2_TX_PKT_NUM(0), val); - - val = rd32(hw, RNP_ETH_IN_3_TX_PKT_NUM(0)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "send_terr:", RNP_ETH_IN_3_TX_PKT_NUM(0), val); - - ret += sprintf(buf + ret, "to_tx_trans(phy):\n"); - - val = rd32(hw, RNP_ETH_EMAC_TX_TO_PHY_PKTS(0)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "in:", RNP_ETH_EMAC_TX_TO_PHY_PKTS(0), val); - - val = rd32(hw, RNP_ETH_TXTRANS_PTP_PKT_NUM(0)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "out:", RNP_ETH_TXTRANS_PTP_PKT_NUM(0), val); - - ret += sprintf(buf + ret, "mac:\n"); - - val = rd32(hw, 0x1081c); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", "tx:", 0x1081c, - val); - - val = rd32(hw, 0x1087c); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "underflow_err:", 0x1087c, val); - - val = rd32(hw, RNP_ETH_TX_DEBUG(0)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "port0_txtrans_sop:", RNP_ETH_TX_DEBUG(0), val); - - val = rd32(hw, RNP_ETH_TX_DEBUG(4)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "port0_txtrans_eop:", RNP_ETH_TX_DEBUG(4), val); - - val = rd32(hw, RNP_ETH_TX_DEBUG(13)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "tx_empty:", RNP_ETH_TX_DEBUG(13), val); - - val = rd32(hw, RNP_ETH_TX_DEBUG(14)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", - "tx_prog_full:", RNP_ETH_TX_DEBUG(14), val); - - val = rd32(hw, RNP_ETH_TX_DEBUG(15)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", - "tx_full:", RNP_ETH_TX_DEBUG(15), val); - - return ret; -} - -static DEVICE_ATTR_RO(tx_counter); - -static ssize_t rx_counter_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - u32 val = 0, port = 0; - int ret = 0; - struct net_device *netdev = to_net_device(dev); - struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_hw *hw = &adapter->hw; - - ret += sprintf(buf + ret, "rx counters\n"); - for (port = 0; port < 4; port++) { - ret += sprintf(buf + ret, "emac_rx_trans (port:%d):\n", - port); - - val = rd32(hw, RNP_RXTRANS_RX_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "pkts:", RNP_RXTRANS_RX_PKTS(port), val); - - val = rd32(hw, RNP_RXTRANS_DROP_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "drop:", RNP_RXTRANS_DROP_PKTS(port), val); - - val = rd32(hw, RNP_RXTRANS_WDT_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "wdt_err:", RNP_RXTRANS_WDT_ERR_PKTS(port), - val); - - val = rd32(hw, RNP_RXTRANS_CODE_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "code_err:", RNP_RXTRANS_CODE_ERR_PKTS(port), - val); - - val = rd32(hw, RNP_RXTRANS_CRC_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "crc_err:", RNP_RXTRANS_CRC_ERR_PKTS(port), - val); - - val = rd32(hw, RNP_RXTRANS_SLEN_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "slen_err:", RNP_RXTRANS_SLEN_ERR_PKTS(port), - val); - - val = rd32(hw, RNP_RXTRANS_GLEN_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "glen_err:", RNP_RXTRANS_GLEN_ERR_PKTS(port), - val); - - val = rd32(hw, RNP_RXTRANS_IPH_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "iph_err:", RNP_RXTRANS_IPH_ERR_PKTS(port), - val); - - val = rd32(hw, RNP_RXTRANS_CSUM_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "csum_err:", RNP_RXTRANS_CSUM_ERR_PKTS(port), - val); - - val = rd32(hw, RNP_RXTRANS_LEN_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "len_err:", RNP_RXTRANS_LEN_ERR_PKTS(port), - val); - - val = rd32(hw, RNP_RXTRANS_CUT_ERR_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "trans_cut_err:", - RNP_RXTRANS_CUT_ERR_PKTS(port), val); - - val = rd32(hw, RNP_RXTRANS_EXCEPT_BYTES(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "expt_byte_err:", - RNP_RXTRANS_EXCEPT_BYTES(port), val); - - val = rd32(hw, RNP_RXTRANS_G1600_BYTES_PKTS(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - ">1600Byte:", - RNP_RXTRANS_G1600_BYTES_PKTS(port), val); - } - - ret += sprintf(buf + ret, "gather:\n"); - val = rd32(hw, RNP_ETH_TOTAL_GAT_RX_PKT_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "total_in_pkts:", RNP_ETH_TOTAL_GAT_RX_PKT_NUM, - val); - - port = 0; - val = rd32(hw, RNP_ETH_RX_PKT_NUM(port)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "to_nxt_mdodule:", RNP_ETH_RX_PKT_NUM(port), val); - - for (port = 0; port < 4; port++) { - u8 pname[16] = { 0 }; - - val = rd32(hw, RNP_ETH_RX_PKT_NUM(port)); - sprintf(pname, "p%d-rx:", port); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", pname, - RNP_ETH_RX_PKT_NUM(port), val); - } - - for (port = 0; port < 4; port++) { - u8 pname[16] = { 0 }; - - val = rd32(hw, RNP_ETH_RX_DROP_PKT_NUM(port)); - sprintf(pname, "p%d-drop:", port); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", pname, - RNP_ETH_RX_DROP_PKT_NUM(port), val); - } - - ret += sprintf(buf + ret, "ip-parse:\n"); - - val = rd32(hw, RNP_ETH_PKT_EGRESS_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "pkg_egree:", RNP_ETH_PKT_EGRESS_NUM, val); - - val = rd32(hw, RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "L3_len_err:", RNP_ETH_PKT_IP_HDR_LEN_ERR_NUM, val); - - val = rd32(hw, RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "ip_hdr_err:", RNP_ETH_PKT_IP_PKT_LEN_ERR_NUM, val); - - val = rd32(hw, RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "l3-csum-err:", RNP_ETH_PKT_L3_HDR_CHK_ERR_NUM, - val); - - val = rd32(hw, RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "l4-csum-err:", RNP_ETH_PKT_L4_HDR_CHK_ERR_NUM, - val); - - val = rd32(hw, RNP_ETH_PKT_SCTP_CHK_ERR_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "sctp-err:", RNP_ETH_PKT_SCTP_CHK_ERR_NUM, val); - - val = rd32(hw, RNP_ETH_PKT_VLAN_ERR_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "vlan-err:", RNP_ETH_PKT_VLAN_ERR_NUM, val); - - val = rd32(hw, RNP_ETH_PKT_EXCEPT_SHORT_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "except_short_num:", RNP_ETH_PKT_EXCEPT_SHORT_NUM, - val); - - val = rd32(hw, RNP_ETH_PKT_PTP_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "ptp:", RNP_ETH_PKT_PTP_NUM, val); - - ret += sprintf(buf + ret, "to-indecap:\n"); - - val = rd32(hw, RNP_ETH_DECAP_PKT_IN_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "*in engin*:", RNP_ETH_DECAP_PKT_IN_NUM, val); - - val = rd32(hw, RNP_ETH_DECAP_PKT_OUT_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "*out engin*:", RNP_ETH_DECAP_PKT_OUT_NUM, val); - - val = rd32(hw, RNP_ETH_DECAP_DMAC_OUT_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "to-dma/host:", RNP_ETH_DECAP_DMAC_OUT_NUM, val); - - val = rd32(hw, RNP_ETH_DECAP_BMC_OUT_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "to-bmc:", RNP_ETH_DECAP_BMC_OUT_NUM, val); - - val = rd32(hw, RNP_ETH_DECAP_SW_OUT_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "to-switch:", RNP_ETH_DECAP_SW_OUT_NUM, val); - - val = rd32(hw, RNP_ETH_DECAP_MIRROR_OUT_NUM); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "bmc+host:", RNP_ETH_DECAP_MIRROR_OUT_NUM, val); - - val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(0x0)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "err_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(0x0), val); - - val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(1)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "plicy_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(1), val); - - val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(2)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "dmac_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(2), val); - - val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(3)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "bmc_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(3), val); - - val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(4)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "sw_drop:", RNP_ETH_DECAP_PKT_DROP_NUM(4), val); - - val = rd32(hw, RNP_ETH_DECAP_PKT_DROP_NUM(5)); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "rm_vlane_num:", RNP_ETH_DECAP_PKT_DROP_NUM(5), - val); - - ret += sprintf(buf + ret, "dma-2-host:\n"); - - val = rd32(hw, 0x264); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "fifo equ:", 0x264, val); - - val = rd32(hw, 0x268); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "fifo deq:", 0x268, val); - - val = rd32(hw, 0x114); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "unexpt_abtring:", 0x114, val); - - val = rd32(hw, 0x288); - ret += sprintf(buf + ret, "\t %16s 0x%08x: %d\n", - "pci2host:", 0x288, val); - - for (port = 0; port < 4; port++) { - ret += sprintf(buf + ret, "rx-ring%d:\n", port); - - val = rd32(hw, RNP_DMA_REG_RX_DESC_BUF_HEAD); - ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", - "head:", RNP_DMA_REG_RX_DESC_BUF_HEAD, val); - - val = rd32(hw, RNP_DMA_REG_RX_DESC_BUF_TAIL); - ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", - "tail:", RNP_DMA_REG_RX_DESC_BUF_TAIL, val); - - val = rd32(hw, RNP_DMA_REG_RX_DESC_BUF_LEN); - ret += sprintf(buf + ret, "\t %16s 0x%08x: 0x%x\n", - "len:", RNP_DMA_REG_RX_DESC_BUF_LEN, val); - } - - return ret; -} - -static DEVICE_ATTR_RO(rx_counter); - static ssize_t active_vid_show(struct device *dev, struct device_attribute *attr, char *buf) { - u16 vid; - u16 current_vid = 0; - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; u8 vfnum = hw->max_vfs - 1; + u16 current_vid = 0; + int ret = 0; + u16 vid; if ((adapter->flags & RNP_FLAG_SRIOV_ENABLED)) { current_vid = rd32(hw, @@ -1139,13 +862,13 @@ static ssize_t active_vid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - u16 vid; - int err = -EINVAL; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; u8 vfnum = hw->max_vfs - 1; + int err = -EINVAL; int port = 0; + u16 vid; if (!(adapter->flags & RNP_FLAG_SRIOV_ENABLED)) return -EIO; @@ -1185,13 +908,13 @@ static inline int pn_sn_dlen(char *v, int v_len) static int rnp_mbx_get_pn_sn(struct rnp_hw *hw, char pn[33], char sn[33]) { struct maintain_req *req; + struct ucfg_mac_sn *cfg; void *dma_buf = NULL; dma_addr_t dma_phy; - struct ucfg_mac_sn *cfg; - int err = 0, - bytes = sizeof(*req) + sizeof(struct ucfg_mac_sn); + int err = 0, bytes; + bytes = sizeof(*req) + sizeof(struct ucfg_mac_sn); memset(pn, 0, 33); memset(sn, 0, 33); @@ -1239,11 +962,11 @@ static int rnp_mbx_get_pn_sn(struct rnp_hw *hw, char pn[33], char sn[33]) static ssize_t own_vpd_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_hw *hw = &adapter->hw; char pn[33] = { 0 }, sn[33] = { 0 }; + struct rnp_hw *hw = &adapter->hw; + int ret = 0; rnp_mbx_get_pn_sn(hw, pn, sn); @@ -1259,9 +982,9 @@ static DEVICE_ATTR_RO(own_vpd); static ssize_t port_idx_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; ret += sprintf(buf, "%d\n", adapter->portid_of_card); @@ -1273,10 +996,10 @@ static ssize_t debug_linkstat_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int ret = 0; ret += sprintf(buf, "%d %d dumy:0x%x up-flag:%d carry:%d\n", adapter->link_up, adapter->hw.link, rd32(hw, 0xc), @@ -1290,10 +1013,10 @@ static DEVICE_ATTR_RO(debug_linkstat); static ssize_t sfp_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int ret = 0; if (rnp_mbx_get_lane_stat(hw) != 0) { ret += sprintf(buf, " IO Error\n"); @@ -1311,11 +1034,11 @@ static DEVICE_ATTR_RO(sfp); static ssize_t pci_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int err = -EINVAL; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; int gen = 3, lanes = 8; + int err = -EINVAL; if (count > 30) return -EINVAL; @@ -1333,10 +1056,10 @@ static ssize_t pci_store(struct device *dev, struct device_attribute *attr, static ssize_t pci_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int ret = 0; if (rnp_mbx_get_lane_stat(hw) != 0) { ret += sprintf(buf, " IO Error\n"); @@ -1353,10 +1076,10 @@ static ssize_t sfp_tx_disable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int err = -EINVAL; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int err = -EINVAL; long enable = 0; if (kstrtol(buf, 10, &enable)) @@ -1372,10 +1095,10 @@ static ssize_t sfp_tx_disable_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int ret = 0; if (rnp_mbx_get_lane_stat(hw) != 0) ret += sprintf(buf, " IO Error\n"); @@ -1390,10 +1113,10 @@ static ssize_t link_traing_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int err = -EINVAL; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int err = -EINVAL; long enable = 0; if (kstrtol(buf, 10, &enable)) @@ -1408,10 +1131,10 @@ static ssize_t link_traing_store(struct device *dev, static ssize_t link_traing_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int ret = 0; if (rnp_mbx_get_lane_stat(hw) != 0) ret += sprintf(buf, " IO Error\n"); @@ -1425,10 +1148,10 @@ static DEVICE_ATTR_RW(link_traing); static ssize_t fec_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int err = -EINVAL; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int err = -EINVAL; long enable = 0; if (kstrtol(buf, 10, &enable)) @@ -1442,10 +1165,10 @@ static ssize_t fec_store(struct device *dev, struct device_attribute *attr, static ssize_t fec_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int ret = 0; if (rnp_mbx_get_lane_stat(hw) != 0) ret += sprintf(buf, " IO Error\n"); @@ -1461,15 +1184,15 @@ static DEVICE_ATTR_RW(fec); static ssize_t pcs_reg_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - u32 reg_hi = 0, reg_lo = 0, pcs_base_regs = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); + u32 reg_hi = 0, reg_lo = 0, pcs_base_regs = 0; struct rnp_hw *hw = &adapter->hw; - int input_arg_cnt; u32 pcs_phy_regs[] = { 0x00040000, 0x00041000, 0x00042000, 0x00043000, 0x00040000, 0x00041000, 0x00042000, 0x00043000, }; + int input_arg_cnt; if (count > 64) { e_err(drv, "Error: Input size >100: too large\n"); @@ -1522,9 +1245,9 @@ static ssize_t pcs_reg_store(struct device *dev, struct device_attribute *attr, static ssize_t pcs_reg_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); + int ret = 0; switch (adapter->sysfs_input_arg_cnt) { case 2: @@ -1554,10 +1277,10 @@ static ssize_t phy_reg_show(struct device *dev, { struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); + int phy_reg = adapter->sysfs_phy_reg; struct rnp_hw *hw = &adapter->hw; - int val = 0; int err = -EINVAL; - int phy_reg = adapter->sysfs_phy_reg; + int val = 0; if (hw) { if (adapter->sysfs_is_phy_ext_reg) { @@ -1584,13 +1307,13 @@ static ssize_t phy_reg_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int i = 0, argc = 0, err = -EINVAL; - char argv[3][16]; - unsigned long val[3] = { 0 }; - int phy_reg = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); + int i = 0, argc = 0, err = -EINVAL; struct rnp_hw *hw = &adapter->hw; + unsigned long val[3] = { 0 }; + char argv[3][16]; + int phy_reg = 0; memset(argv, 0, sizeof(argv)); argc = sscanf(buf, "%15s %15s %15s", argv[0], argv[1], argv[2]); @@ -1655,10 +1378,10 @@ static ssize_t prbs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int err = -EINVAL; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int err = -EINVAL; long prbs = 0; if (kstrtol(buf, 10, &prbs)) @@ -1673,10 +1396,10 @@ static ssize_t autoneg_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int err = -EINVAL; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int err = -EINVAL; long enable = 0; if (kstrtol(buf, 10, &enable)) @@ -1689,10 +1412,10 @@ static ssize_t autoneg_store(struct device *dev, static ssize_t autoneg_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int ret = 0; if (rnp_mbx_get_lane_stat(hw) != 0) ret += sprintf(buf, " IO Error\n"); @@ -1707,11 +1430,11 @@ static ssize_t si_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int err = -EINVAL; + int si_main = -1, si_pre = -1, si_post = -1, si_txboost = -1; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; - int si_main = -1, si_pre = -1, si_post = -1, si_txboost = -1; + int err = -EINVAL; int cnt; if (rnp_mbx_get_lane_stat(hw) != 0) { @@ -1790,10 +1513,10 @@ static ssize_t si_store(struct device *dev, static ssize_t si_show(struct device *dev, struct device_attribute *attr, char *buf) { - int ret = 0, i; struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); struct rnp_hw *hw = &adapter->hw; + int ret = 0, i; if (rnp_mbx_get_lane_stat(hw) != 0) { ret += sprintf(buf, " IO Error\n"); @@ -1835,8 +1558,8 @@ static ssize_t temperature_show(struct device *dev, { struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); - struct rnp_hw *hw = &adapter->hw; int ret = 0, temp = 0, voltage = 0; + struct rnp_hw *hw = &adapter->hw; temp = rnp_mbx_get_temp(hw, &voltage); @@ -1864,8 +1587,8 @@ static ssize_t root_slot_info_show(struct device *dev, { struct net_device *netdev = to_net_device(dev); struct rnp_adapter *adapter = netdev_priv(netdev); - int ret = 0; struct pci_dev *root_pdev = pcie_find_root_port_old(adapter->pdev); + int ret = 0; if (root_pdev) { ret += sprintf(buf + ret, "%02x:%02x.%x\n", @@ -1880,8 +1603,8 @@ static ssize_t root_slot_info_show(struct device *dev, static int do_switch_loopback_set(struct rnp_adapter *adapter, int en, int sport_lane, int dport_lane) { - int v; struct rnp_hw *hw = &adapter->hw; + int v; pr_info("%s: %s %d -> %d en:%d\n", __func__, netdev_name(adapter->netdev), sport_lane, dport_lane, en); @@ -1920,8 +1643,8 @@ static int do_switch_loopback_set(struct rnp_adapter *adapter, int en, static ssize_t _switch_loopback(struct rnp_adapter *adapter, const char *peer_eth, int en) { - struct net_device *peer_netdev = NULL; struct rnp_adapter *peer_adapter = NULL; + struct net_device *peer_netdev = NULL; char name[100]; strscpy(name, peer_eth, sizeof(name)); @@ -1987,15 +1710,20 @@ static DEVICE_ATTR_RO(para_info); static DEVICE_ATTR_RW(tx_desc_info); static DEVICE_ATTR_RW(rx_desc_info); static DEVICE_ATTR_RW(rx_drop_info); +static DEVICE_ATTR_RW(ring_sriov_info); static DEVICE_ATTR_RW(outer_vlan_info); static DEVICE_ATTR_RW(tcp_sync_info); static DEVICE_ATTR_RW(rx_skip_info); static DEVICE_ATTR_RW(tx_stags_info); +static DEVICE_ATTR_RO(version_info); + static struct attribute *dev_attrs[] = { &dev_attr_tx_stags_info.attr, &dev_attr_root_slot_info.attr, &dev_attr_active_vid.attr, + &dev_attr_version_info.attr, &dev_attr_rx_drop_info.attr, + &dev_attr_ring_sriov_info.attr, &dev_attr_outer_vlan_info.attr, &dev_attr_tcp_sync_info.attr, &dev_attr_para_info.attr, @@ -2019,8 +1747,6 @@ static struct attribute *vendor_dev_attrs[] = { &dev_attr_phy_reg.attr, &dev_attr_tx_desc_info.attr, &dev_attr_rx_desc_info.attr, - &dev_attr_tx_counter.attr, - &dev_attr_rx_counter.attr, &dev_attr_tx_ring_info.attr, &dev_attr_rx_ring_info.attr, &dev_attr_rx_skip_info.attr, @@ -2065,11 +1791,11 @@ void rnp_sysfs_exit(struct rnp_adapter *adapter) /* called from rnp_main.c */ int rnp_sysfs_init(struct rnp_adapter *adapter) { - int rc = 0; - int flag; struct hwmon_buff *rnp_hwmon; struct device *hwmon_dev; unsigned int i; + int rc = 0; + int flag; flag = sysfs_create_groups(&adapter->netdev->dev.kobj, &attr_grps[0]); diff --git a/drivers/net/ethernet/mucse/rnp/rnp_type.h b/drivers/net/ethernet/mucse/rnp/rnp_type.h index 0cf9806075ef..a9f3d7c5e7eb 100644 --- a/drivers/net/ethernet/mucse/rnp/rnp_type.h +++ b/drivers/net/ethernet/mucse/rnp/rnp_type.h @@ -736,6 +736,7 @@ struct rnp_eth_operations { enum { rnp_driver_insmod, rnp_driver_suspuse, + rnp_driver_force_control_mac, }; struct rnp_hw_operations { @@ -1103,7 +1104,7 @@ struct rnp_hw { int nr_lane; u8 is_backplane : 1; u8 is_sgmii : 1; - u8 force_10g_1g_speed_ablity : 1; + u8 force_10g_1g_speed_ability : 1; u8 force_speed_stat : 2; #define FORCE_SPEED_STAT_DISABLED 0 #define FORCE_SPEED_STAT_1G 1 @@ -1115,6 +1116,7 @@ struct rnp_hw { u16 ncsi_mc_count; u16 ncsi_vlan_count; u32 ncsi_vf_cpu_shm_pf_base; + u32 saved_force_link_speed; u32 pcode; u32 supported_link; u32 advertised_link; @@ -1122,7 +1124,7 @@ struct rnp_hw { u32 tp_mdx; u32 tp_mdix_ctrl; u32 phy_id; - u8 fw_lldp_ablity; + u8 fw_lldp_ability; u8 link; u8 pci_gen; u8 pci_lanes; @@ -1132,6 +1134,8 @@ struct rnp_hw { u32 dma_version; u32 wol; u32 eco; + u32 force_status; + u32 force_link_supported; u16 min_length; u16 max_length; u16 min_length_current; diff --git a/drivers/net/ethernet/mucse/rnp/version.h b/drivers/net/ethernet/mucse/rnp/version.h index a8020b4fdf30..8a22ddf9e50f 100644 --- a/drivers/net/ethernet/mucse/rnp/version.h +++ b/drivers/net/ethernet/mucse/rnp/version.h @@ -3,5 +3,5 @@ #ifndef VERSION_H #define VERSION_H -#define GIT_COMMIT " 8206c05" +#define GIT_COMMIT " 4650a79" #endif -- Gitee