From 08f4280700700faaa2393cbf9a402d4f51d3a8a8 Mon Sep 17 00:00:00 2001 From: xiaohuihui-bzwx-kj Date: Thu, 11 Apr 2024 23:40:19 -0700 Subject: [PATCH] drivers: add Chengdu BeiZhongWangXin Technology N5/N6 Series Network Card Driver bzwx inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9EHCB CVE: NA -------------------------------- bzwx ne6x/ne6xvf drivers provide Ethernet features for Chengdu BeiZhongWangXin N5/N6 series NICs. Signed-off-by: xiaohuihui-bzwx-kj --- arch/arm64/configs/openeuler_defconfig | 4 + arch/x86/configs/openeuler_defconfig | 4 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/bzwx/Kconfig | 21 + drivers/net/ethernet/bzwx/Makefile | 6 + drivers/net/ethernet/bzwx/nce/Kconfig | 36 + drivers/net/ethernet/bzwx/nce/Makefile | 32 + drivers/net/ethernet/bzwx/nce/comm/common.h | 262 ++ drivers/net/ethernet/bzwx/nce/comm/feature.h | 77 + drivers/net/ethernet/bzwx/nce/comm/mailbox.h | 147 + drivers/net/ethernet/bzwx/nce/comm/reg.h | 255 ++ .../net/ethernet/bzwx/nce/comm/trace_comm.h | 199 + drivers/net/ethernet/bzwx/nce/comm/txrx.c | 1566 ++++++++ drivers/net/ethernet/bzwx/nce/comm/txrx.h | 476 +++ drivers/net/ethernet/bzwx/nce/comm/version.h | 9 + drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h | 468 +++ .../net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c | 631 ++++ .../net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h | 149 + .../net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c | 2397 ++++++++++++ .../net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h | 69 + drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c | 1602 ++++++++ drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h | 319 ++ .../net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c | 1623 ++++++++ .../net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h | 29 + .../ethernet/bzwx/nce/ne6x/ne6x_interrupt.c | 700 ++++ .../ethernet/bzwx/nce/ne6x/ne6x_interrupt.h | 27 + .../net/ethernet/bzwx/nce/ne6x/ne6x_main.c | 3112 ++++++++++++++++ .../net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c | 250 ++ .../net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h | 39 + .../net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h | 36 + .../net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c | 171 + .../net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h | 14 + drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c | 1620 ++++++++ drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h | 249 ++ .../net/ethernet/bzwx/nce/ne6x/ne6x_trace.h | 28 + .../net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c | 445 +++ .../net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h | 11 + .../ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c | 2388 ++++++++++++ .../ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h | 163 + .../ethernet/bzwx/nce/ne6x_vf/ne6x_trace.h | 28 + .../net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h | 555 +++ .../bzwx/nce/ne6x_vf/ne6xvf_debugfs.c | 305 ++ .../bzwx/nce/ne6x_vf/ne6xvf_ethtool.c | 846 +++++ .../bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h | 23 + .../ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c | 3303 +++++++++++++++++ .../ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h | 23 + .../ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c | 160 + .../ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h | 11 + .../bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c | 1123 ++++++ .../bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h | 123 + 51 files changed, 26136 insertions(+) create mode 100644 drivers/net/ethernet/bzwx/Kconfig create mode 100644 drivers/net/ethernet/bzwx/Makefile create mode 100644 drivers/net/ethernet/bzwx/nce/Kconfig create mode 100644 drivers/net/ethernet/bzwx/nce/Makefile create mode 100644 drivers/net/ethernet/bzwx/nce/comm/common.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/feature.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/mailbox.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/reg.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/trace_comm.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/txrx.c create mode 100644 drivers/net/ethernet/bzwx/nce/comm/txrx.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/version.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_trace.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6x_trace.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 3909c51c3133..471f87e6ab88 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -3063,6 +3063,10 @@ CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_NET_VENDOR_BZWX=y +CONFIG_NCE=m +CONFIG_NE6X=m +CONFIG_NE6XVF=m # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_NET_SB1000 is not set diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index f15f089419e6..a3581b09d4e4 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -3061,6 +3061,10 @@ CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_NET_VENDOR_BZWX=y +CONFIG_NCE=m +CONFIG_NE6X=m +CONFIG_NE6XVF=m # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_NET_SB1000 is not set diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 466a4c1adeaf..a97f993eb7a1 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -194,5 +194,6 @@ source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" +source "drivers/net/ethernet/bzwx/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index c4d4c2ab1505..e87c480d0840 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -106,3 +106,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ +obj-$(CONFIG_NET_VENDOR_BZWX) += bzwx/ diff --git a/drivers/net/ethernet/bzwx/Kconfig b/drivers/net/ethernet/bzwx/Kconfig new file mode 100644 index 000000000000..5cc757ceba64 --- /dev/null +++ b/drivers/net/ethernet/bzwx/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# BeiZhongWangXin device configuration +# + +config NET_VENDOR_BZWX + bool "BeiZhongWangXin devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about BeiZhongWangXin devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_BZWX + +source "drivers/net/ethernet/bzwx/nce/Kconfig" + +endif # NET_VENDOR_BZWX diff --git a/drivers/net/ethernet/bzwx/Makefile b/drivers/net/ethernet/bzwx/Makefile new file mode 100644 index 000000000000..05273f2858c5 --- /dev/null +++ b/drivers/net/ethernet/bzwx/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the BeiZhongWangXin network device drivers. +# + +obj-$(CONFIG_NCE) += nce/ diff --git a/drivers/net/ethernet/bzwx/nce/Kconfig b/drivers/net/ethernet/bzwx/nce/Kconfig new file mode 100644 index 000000000000..694c1108f8b4 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/Kconfig @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# BeiZhongWangXin device configuration + + +config NCE + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Support" + depends on PCI + help + This selects the drivers support BeiZhongWangXin Ethernet Connection N5/N6 Series devices. + +if NCE + +config NE6X + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Support" + default n + depends on PCI_MSI + help + This driver supports BeiZhongWangXin Ethernet Connection N5/N6 Series + of devices. + + To compile this driver as a module, choose M here. + The module will be called ncepf. + +config NE6XVF + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Virtual Function support" + depends on PCI_MSI + depends on NE6X + help + This driver supports virtual functions for BeiZhongWangXin Ethernet Connection N5/N6 Series + Virtual Function devices. + + To compile this driver as a module, choose M here. The module + will be called ncevf. + +endif #NCE diff --git a/drivers/net/ethernet/bzwx/nce/Makefile b/drivers/net/ethernet/bzwx/nce/Makefile new file mode 100644 index 000000000000..82d06b396139 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the BeiZhongWangXin network device drivers. +# + +subdir-ccflags-y += -I$(src)/comm +subdir-ccflags-y += -I$(src)/ne6x +subdir-ccflags-y += -I$(src)/ne6x_vf + +obj-$(CONFIG_NE6X) += ncepf.o +ncepf-objs := comm/txrx.o \ + ne6x/ne6x_main.o \ + ne6x/ne6x_ethtool.o \ + ne6x/ne6x_procfs.o \ + ne6x/ne6x_netlink.o \ + ne6x/ne6x_interrupt.o \ + ne6x/ne6x_reg.o \ + ne6x/ne6x_dev.o \ + ne6x/ne6x_txrx.o + +ncepf-$(CONFIG_DEBUG_FS) += ne6x/ne6x_debugfs.o +ncepf-$(CONFIG_PCI_IOV) += ne6x/ne6x_virtchnl_pf.o +ncepf-$(CONFIG_RFS_ACCEL) += ne6x/ne6x_arfs.o + +obj-$(CONFIG_NE6XVF) += ncevf.o +ncevf-objs := comm/txrx.o \ + ne6x_vf/ne6xvf_main.o \ + ne6x_vf/ne6xvf_ethtool.o \ + ne6x_vf/ne6xvf_virtchnl.o \ + ne6x_vf/ne6xvf_txrx.o + +ncevf-$(CONFIG_DEBUG_FS) += ne6x_vf/ne6xvf_debugfs.o diff --git a/drivers/net/ethernet/bzwx/nce/comm/common.h b/drivers/net/ethernet/bzwx/nce/comm/common.h new file mode 100644 index 000000000000..b3c35edbf124 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/common.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMMON_H +#define _NE6X_COMMON_H + +#define NE6X_MAX_U64 0xFFFFFFFFFFFFFFFFULL + +#define NE6X_MODULE_TYPE_TOTAL_BYTE 3 + +#define NE6X_AQ_LINK_UP 0x1ULL +#define NE6X_AQ_AN_COMPLETED BIT(0) + +#define PCI_VENDOR_ID_BZWX 0xD20C + +struct ne6x_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_miss; + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ + u64 rx_malform; + u64 tx_malform; +}; + +enum ne6x_phy_type { + NE6X_PHY_TYPE_UNKNOWN = 0, + NE6X_PHY_TYPE_10GBASE = 1, + NE6X_PHY_TYPE_25GBASE, + NE6X_PHY_TYPE_40GBASE, + NE6X_PHY_TYPE_100GBASE, + NE6X_PHY_TYPE_200GBASE, +}; + +#define NE6X_LINK_SPEED_10GB_SHIFT 0x1 +#define NE6X_LINK_SPEED_40GB_SHIFT 0x2 +#define NE6X_LINK_SPEED_25GB_SHIFT 0x3 +#define NE6X_LINK_SPEED_100GB_SHIFT 0x4 +#define NE6X_LINK_SPEED_200GB_SHIFT 0x5 + +enum ne6x_sdk_link_speed { + NE6X_LINK_SPEED_UNKNOWN = 0, + NE6X_LINK_SPEED_10GB = BIT(NE6X_LINK_SPEED_10GB_SHIFT), + NE6X_LINK_SPEED_40GB = BIT(NE6X_LINK_SPEED_40GB_SHIFT), + NE6X_LINK_SPEED_25GB = BIT(NE6X_LINK_SPEED_25GB_SHIFT), + NE6X_LINK_SPEED_100GB = BIT(NE6X_LINK_SPEED_100GB_SHIFT), + NE6X_LINK_SPEED_200GB = BIT(NE6X_LINK_SPEED_200GB_SHIFT), +}; + +struct ne6x_link_status { + u64 phy_type_low; + u64 phy_type_high; + + u16 max_frame_size; + u16 req_speeds; + u8 topo_media_conflict; + u8 link_cfg_err; + u8 lse_ena; /* Link Status Event notification */ + u8 link_info; + u8 an_info; + u8 ext_info; + u8 fec_info; + u8 pacing; + u32 link_speed; + u8 module_type[NE6X_MODULE_TYPE_TOTAL_BYTE]; +}; + +struct ne6x_mac_info { + u8 perm_addr[ETH_ALEN]; +}; + +struct ne6x_link_info { + u32 link; + u32 speed; +}; + +enum ne6x_media_type { + NE6X_MEDIA_UNKNOWN = 0, + NE6X_MEDIA_FIBER, + NE6X_MEDIA_BASET, + NE6X_MEDIA_BACKPLANE, + NE6X_MEDIA_DA, + NE6X_MEDIA_AUI, +}; + +struct ne6x_phy_info { + struct ne6x_link_status link_info; + struct ne6x_link_status link_info_old; + u64 phy_type_low; + u64 phy_type_high; + enum ne6x_media_type media_type; + u8 get_link_info; + u16 curr_user_speed_req; +}; + +struct ne6x_port_info { + struct ne6x_hw *hw; /* back pointer to HW instance */ + + u8 lport; + u8 hw_port_id; /* hardware port id */ + u8 hw_trunk_id; + u32 hw_queue_base_old; + u32 hw_queue_base; + u32 hw_max_queue; + + u32 queue; /* current used queue */ + struct ne6x_link_info link_status; + struct ne6x_mac_info mac; + struct ne6x_phy_info phy; +}; + +struct ne6x_bus_info { + u16 domain_num; + u16 device; + u8 func; + u8 bus_num; +}; + +struct ne6x_mbx_snap_buffer_data { + u8 state : 4; + u8 len : 4; + u8 type; + u8 data[6]; +}; + +/* Structure to track messages sent by VFs on mailbox: + * 1. vf_cntr : a counter array of VFs to track the number of + * asynchronous messages sent by each VF + * 2. vfcntr_len : number of entries in VF counter array + */ +struct ne6x_mbx_vf_counter { + u32 *vf_cntr; + u32 vfcntr_len; +}; + +/* Enum defining the different states of the mailbox snapshot in the + * PF-VF mailbox overflow detection algorithm. The + * snapshot can be in + * states: + * 1. NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot + * within + * the mailbox buffer. + * 2. NE6X_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot + * 3. + * NE6X_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the + * mailbox and mark any VFs sending more + * messages than the threshold limit set. + * 4. NE6X_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to + * 0xFFFFFFFF. + */ +enum ne6x_mbx_snapshot_state { + NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0, + NE6X_MAL_VF_DETECT_STATE_TRAVERSE, + NE6X_MAL_VF_DETECT_STATE_DETECT, + NE6X_MAL_VF_DETECT_STATE_INVALID = 0xF, +}; + +struct ne6x_mbx_snapshot { + enum ne6x_mbx_snapshot_state state; + struct ne6x_mbx_vf_counter mbx_vf; +}; + +enum virtchnl_vf_config_codes { + VIRTCHNL_VF_CONFIG_TRUST = 0, + VIRTCHNL_VF_CONFIG_FORCE_LINK = 1, +}; + +struct virtchnl_vf_config { + u8 type; + u8 data[5]; +}; + +enum ne6x_adapter_state { + NE6X_ADPT_DOWN, + NE6X_ADPT_NEEDS_RESTART, + NE6X_ADPT_NETDEV_ALLOCD, + NE6X_ADPT_NETDEV_REGISTERED, + NE6X_ADPT_UMAC_FLTR_CHANGED, + NE6X_ADPT_MMAC_FLTR_CHANGED, + NE6X_ADPT_VLAN_FLTR_CHANGED, + NE6X_ADPT_PROMISC_CHANGED, + NE6X_ADPT_RELEASING, + NE6X_ADPT_RECOVER, + NE6X_ADPT_DOWN_REQUESTED, + NE6X_ADPT_OPEN, + NE6X_ADPT_NBITS /* must be last */ +}; + +struct ne6x_adapt_comm { + u16 port_info; + DECLARE_BITMAP(state, NE6X_ADPT_NBITS); +}; + +struct ne6x_vlan { + u16 tpid; + u16 vid; + u8 prio; +}; + +struct ne6x_vf_vlan { + u16 vid; + u16 tpid; +}; + +struct ne6x_macvlan { + struct list_head list; + struct net_device *vdev; + u8 mac[ETH_ALEN]; +}; + +/* values for UPT1_RSSConf.hashFunc */ +enum { + NE6X_RSS_HASH_TYPE_NONE = 0x0, + NE6X_RSS_HASH_TYPE_IPV4 = 0x01, + NE6X_RSS_HASH_TYPE_IPV4_TCP = 0x02, + NE6X_RSS_HASH_TYPE_IPV6 = 0x04, + NE6X_RSS_HASH_TYPE_IPV6_TCP = 0x08, + NE6X_RSS_HASH_TYPE_IPV4_UDP = 0x10, + NE6X_RSS_HASH_TYPE_IPV6_UDP = 0x20, +}; + +enum { + NE6X_RSS_HASH_FUNC_NONE = 0x0, + NE6X_RSS_HASH_FUNC_TOEPLITZ = 0x01, +}; + +#define NE6X_RSS_MAX_KEY_SIZE 40 +#define NE6X_RSS_MAX_IND_TABLE_SIZE 128 + +struct ne6x_rss_info { + u16 hash_type; + u16 hash_func; + u16 hash_key_size; + u16 ind_table_size; + u8 hash_key[NE6X_RSS_MAX_KEY_SIZE]; + u8 ind_table[NE6X_RSS_MAX_IND_TABLE_SIZE]; +}; + +#define NE6X_VF_VLAN(vid, tpid) ((struct ne6x_vf_vlan){vid, tpid}) + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/feature.h b/drivers/net/ethernet/bzwx/nce/comm/feature.h new file mode 100644 index 000000000000..482b4d2d1d39 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/feature.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_FEATURE_H +#define _NE6X_FEATURE_H + +#define NE6X_F_RSS BIT(0) +#define NE6X_F_PROMISC BIT(1) +#define NE6X_F_RX_IPV4_CKSUM BIT(2) +#define NE6X_F_RX_UDP_CKSUM BIT(3) +#define NE6X_F_RX_TCP_CKSUM BIT(4) +#define NE6X_F_RX_SCTP_CKSUM BIT(5) +#define NE6X_F_RX_VLAN_STRIP BIT(6) +#define NE6X_F_RX_QINQ_STRIP BIT(7) +#define NE6X_F_RX_VLAN_FILTER BIT(8) +#define NE6X_F_LRO BIT(9) +#define NE6X_F_RX_DISABLE BIT(10) +#define NE6X_F_RX_FW_LLDP BIT(11) +#define NE6X_F_RX_ALLMULTI BIT(12) +#define NE6X_F_FLOW_STEERING BIT(15) +#define NE6X_F_TX_VLAN BIT(16) +#define NE6X_F_TX_IP_CKSUM BIT(17) +#define NE6X_F_TX_TCP_CKSUM BIT(18) +#define NE6X_F_TX_UDP_CKSUM BIT(19) +#define NE6X_F_TX_SCTP_CKSUM BIT(20) +#define NE6X_F_TX_TCP_SEG BIT(21) +#define NE6X_F_TX_UDP_SEG BIT(22) +#define NE6X_F_TX_QINQ BIT(23) +#define NE6X_F_TX_NIC_SWITCH BIT(24) +#define NE6X_F_TX_MAC_LEARN BIT(25) +#define NE6X_F_TX_DISABLE BIT(26) +#define NE6X_F_TX_QOSBANDWIDTH BIT(27) +#define NE6X_F_TX_UDP_TNL_SEG BIT(28) +#define NE6X_F_TX_UDP_TNL_CSUM BIT(29) + +#define NE6X_OFFLOAD_RSS NE6X_F_RSS +#define NE6X_OFFLOAD_RXCSUM (NE6X_F_RX_IPV4_CKSUM | \ + NE6X_F_RX_UDP_CKSUM | \ + NE6X_F_RX_TCP_CKSUM | \ + NE6X_F_RX_SCTP_CKSUM) +#define NE6X_OFFLOAD_TXCSUM (NE6X_F_TX_IP_CKSUM | \ + NE6X_F_TX_TCP_CKSUM | \ + NE6X_F_TX_UDP_CKSUM | \ + NE6X_F_TX_UDP_TNL_CSUM) + +#define NE6X_OFFLOAD_LRO NE6X_F_LRO +#define NE6X_OFFLOAD_TSO NE6X_F_TX_TCP_SEG +#define NE6X_OFFLOAD_UFO NE6X_F_TX_UDP_SEG +#define NE6X_OFFLOAD_SCTP_CSUM NE6X_F_TX_SCTP_CKSUM + +#define NE6X_OFFLOAD_RXD_VLAN (NE6X_F_RX_VLAN_STRIP | \ + NE6X_F_RX_QINQ_STRIP | \ + NE6X_F_RX_VLAN_FILTER) +#define NE6X_OFFLOAD_TXD_VLAN (NE6X_F_TX_VLAN | NE6X_F_TX_QINQ) +#define NE6X_OFFLOAD_L2 NE6X_F_TX_NIC_SWITCH + +#define NE6X_F_SMART_ENABLED BIT(0) +#define NE6X_F_SRIOV_ENABLED BIT(1) +#define NE6X_F_SWITCH_ENABLED BIT(2) +#define NE6X_F_L2FDB_LEARN_ENABLED BIT(3) +#define NE6X_F_VLAN_ENABLED BIT(4) +#define NE6X_F_WHITELIST_ENABLED BIT(5) +#define NE6X_F_DDOS_ENABLED BIT(6) +#define NE6X_F_TRUST_VLAN_ENABLED BIT(7) +#define NE6X_F_S_ROCE_ICRC_ENABLED BIT(8) + +#define NE6X_F_ACK_FLOOD BIT(0) +#define NE6X_F_PUSH_ACK_FLOOD BIT(1) +#define NE6X_F_SYN_ACK_FLOOD BIT(2) +#define NE6X_F_FIN_FLOOD BIT(3) +#define NE6X_F_RST_FLOOD BIT(4) +#define NE6X_F_PUSH_SYN_ACK_FLOOD BIT(5) +#define NE6X_F_UDP_FLOOD BIT(6) +#define NE6X_F_ICMP_FLOOD BIT(7) +#define NE6X_F_FRAGMENT_FLOOD BIT(8) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/mailbox.h b/drivers/net/ethernet/bzwx/nce/comm/mailbox.h new file mode 100644 index 000000000000..85ae76b1321f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/mailbox.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMM_MAILBOX_H +#define _NE6X_COMM_MAILBOX_H + +enum virtchnl_ops { + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_ADPT_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + /* promiscuous mode / unicast promisc / multicast promisc */ + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_EVENT = 17, /* link state */ + VIRTCHNL_OP_SET_VF_ADDR = 18, + VIRTCHNL_OP_VF_CONFIG = 19, + VIRTCHNL_OP_CONFIG_OFFLOAD = 27, + VIRTCHNL_OP_GET_VF_FEATURE = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_CONFIG_RSS = 30, + VIRTCHNL_OP_GET_PORT_STATUS = 31, + VIRTCHNL_OP_CHANGED_RSS = 32, + VIRTCHNL_OP_SET_VF_STATE = 33, + VIRTCHNL_OP_SET_FAST_MDOE = 34, + VIRTCHNL_OP_CONFIG_VLAN = 40, + VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD = 41, + VIRTCHNL_OP_CONFIG_MTU = 42, + VIRTCHNL_OP_CONFIG_FLOW_CTRL = 43, + + VIRTCHNL_OP_MAX, +}; + +static char local_error_buffer[64]; +static inline const char *ne6x_opcode_str(enum virtchnl_ops opcode) +{ + sprintf(local_error_buffer, "__OPCODE_UNKNOWN_OPCODE(%d)", opcode); + switch (opcode) { + case VIRTCHNL_OP_VERSION: + return "__OPCODE_GET_VERSION"; + case VIRTCHNL_OP_RESET_VF: + return "__OPCODE_RESET_VF"; + case VIRTCHNL_OP_GET_VF_RESOURCES: + return "__OPCODE_GET_VF_RESOURCES"; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + return "__OPCODE_CONFIG_TX_QUEUE"; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + return "__OPCODE_INIT_EXTENDED_CAPS"; + case VIRTCHNL_OP_CONFIG_ADPT_QUEUES: + return "__OPCODE_CONFIG_ADPT_QUEUES"; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + return "__OPCODE_CONFIG_IRQ_MAP"; + case VIRTCHNL_OP_ENABLE_QUEUES: + return "__OPCODE_ENABLE_QUEUES"; + case VIRTCHNL_OP_DISABLE_QUEUES: + return "__OPCODE_DISABLE_QUEUES"; + case VIRTCHNL_OP_ADD_ETH_ADDR: + return "__OPCODE_ADD_ETH_ADDR"; + case VIRTCHNL_OP_DEL_ETH_ADDR: + return "__OPCODE_DEL_ETH_ADDR"; + case VIRTCHNL_OP_ADD_VLAN: + return "__OPCODE_ADD_VLAN"; + case VIRTCHNL_OP_DEL_VLAN: + return "__OPCODE_DEL_VLAN"; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + return "__OPCODE_CONFIG_PROMISCUOUS_MODE"; + case VIRTCHNL_OP_EVENT: + return "__OPCODE_EVENT"; + case VIRTCHNL_OP_CONFIG_RSS: + return "__OPCODE_CONFIG_RSS"; + case VIRTCHNL_OP_CHANGED_RSS: + return "__OP_CHANGED_RSS"; + case VIRTCHNL_OP_CONFIG_OFFLOAD: + return "__OPCODE_CONFIGURE_OFFLOAD"; + case VIRTCHNL_OP_GET_VF_FEATURE: + return "VIRTCHNL_OP_GET_VF_FEATURE"; + case VIRTCHNL_OP_REQUEST_QUEUES: + return "__OPCODE_REQUEST_QUEUES"; + case VIRTCHNL_OP_GET_PORT_STATUS: + return "__OP_GET_PORT_STATUS"; + case VIRTCHNL_OP_SET_VF_ADDR: + return "__OPCODE_SET_VF_ADDR"; + case VIRTCHNL_OP_VF_CONFIG: + return "__VIRTCHNL_OP_VF_CONFIG"; + case VIRTCHNL_OP_SET_VF_STATE: + return "__VIRTCHNL_OP_SET_VF_STATE"; + case VIRTCHNL_OP_SET_FAST_MDOE: + return "__VIRTCHNL_OP_SET_FAST_MDOE"; + case VIRTCHNL_OP_CONFIG_VLAN: + return "__VIRTCHNL_OP_CONFIG_VLAN"; + case VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD: + return "__VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD"; + case VIRTCHNL_OP_CONFIG_MTU: + return "__VIRTCHNL_OP_CONFIG_MTU"; + case VIRTCHNL_OP_CONFIG_FLOW_CTRL: + return "__VIRTCHNL_OP_CONFIG_FLOW_CTRL"; + default: + return local_error_buffer; + } +} + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, +}; + +static inline const char *ne6x_mbox_status_str(enum virtchnl_status_code opcode) +{ + switch (opcode) { + case VIRTCHNL_STATUS_SUCCESS: + return "__STATUS_SUCCESS"; + case VIRTCHNL_STATUS_ERR_PARAM: + return "__STATUS_ERR_PARAM"; + case VIRTCHNL_STATUS_ERR_NO_MEMORY: + return "__STATUS_ERR_NO_MEMORY"; + case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: + return "__STATUS_ERR_OPCODE_MISMATCH"; + case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: + return "__STATUS_ERR_CQP_COMPL_ERROR"; + case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: + return "__STATUS_ERR_INVALID_VF_ID"; + case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: + return "__STATUS_ERR_ADMIN_QUEUE_ERROR"; + case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: + return "__STATUS_ERR_NOT_SUPPORTED"; + default: + return "__STATUS_UNKNOWN"; + } +} + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/reg.h b/drivers/net/ethernet/bzwx/nce/comm/reg.h new file mode 100644 index 000000000000..15a745bb06f3 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/reg.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMM_REG_H +#define _NE6X_COMM_REG_H + +#include + +#define NE6X_BAR2_VP_TDQ(__vp, __reg) \ + ((((__vp) & 0x7f) << 12) | (0 << 11) | (((__reg) & 0xff) << 3)) +#define NE6X_BAR2_VP_RDQ(__vp, __reg) \ + ((((__vp) & 0x7f) << 12) | (1 << 11) | (((__reg) & 0xff) << 3)) + +/* CIU */ +#define NE6X_VP_BASE_ADDR 0x0 +#define NE6X_VPINT_DYN_CTLN(_VPID, _OFFSET) \ + (((_VPID) << 12) + ((_OFFSET) << 4)) /* _i=0...64 * Reset: PFR */ +#define NE6X_PF_BASE_ADDR 0x138ULL +#define NE6X_PFINT_DYN_CTLN(_PFID, _OFFSET) \ + (((NE6X_PF_BASE_ADDR + (_PFID)) << 12) + ((_OFFSET) << 4)) + /* _i=0...7 */ /* Reset: PFR */ + +#define NE6X_VP_INT 0x00 +#define NE6X_VP_INT_SET 0x01 +#define NE6X_VP_INT_MASK 0x02 +#define NE6X_VP_CQ_INTSHIFT 16 +#define NE6X_CQ_BASE_ADDR 0x03 +#define NE6X_CQ_HD_POINTER 0x04 +#define NE6X_CQ_CFG 0x05 +#define NE6X_RQ_BASE_ADDR 0x07 +#define NE6X_RQ_CFG 0x08 +#define NE6X_RQ_TAIL_POINTER 0x09 +#define NE6X_VP_RELOAD 0x0a +#define NE6X_SQ_BASE_ADDR 0x0b +#define NE6X_SQ_CFG 0x0c +#define NE6X_SQ_TAIL_POINTER 0x0d +#define NE6X_CQ_TAIL_POINTER 0x11 +#define NE6X_RQ_BUFF_OFST 0x12 +#define NE6X_RQ_HD_POINTER 0x13 +#define NE6X_SQ_BUFF_OFST 0x14 +#define NE6X_SQ_HD_POINTER 0x15 +#define NE6X_RQ_OFST 0x16 +#define NE6X_SQ_OFST 0x17 +#define NE6X_RQ_BLOCK_CFG 0x1b +#define NE6X_SQ_METER_CFG0 0x1c +#define NE6X_SQ_METER_CFG1 0x1d +#define NE6X_SQ_METER_CFG2 0x1e +#define NE6X_SQ_METER_CFG3 0x1f +#define NE6X_INT_CFG 0x21 +#define NE6X_CIU_TIME_OUT_CFG 0x45 +#define NE6X_ALL_CQ_CFG 0x46 +#define NE6X_ALL_SQ_CFG 0x47 +#define NE6X_ALL_RQ_CFG 0x48 +#define NE6X_MERGE_CFG 0x49 +#define NE6X_BFD_RECV_CNT 0x4a +#define NE6X_ETH_RECV_CNT 0x4b + +#define NE6X_PF_CON_ADDR(_OFST) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((_OFST) << 4)) +#define NE6X_PF_MAILBOX_DATA 0x40 +#define NE6X_VF_MAILBOX_DATA 0x80 +#define NE6X_PF_MAILBOX_ADDR(_VP) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((NE6X_PF_MAILBOX_DATA + (_VP)) << 4)) +#define NE6X_VF_MAILBOX_ADDR(_VP) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((NE6X_VF_MAILBOX_DATA + (_VP)) << 4)) +#define NE6X_PF_DB_INT_REQ 0xC0 +#define NE6X_PF_DB_INT_ACK 0xC1 +#define NE6X_PF_DB_DREQ_INT 0xC2 +#define NE6X_PF_DB_DREQ_INT_SET 0xC3 +#define NE6X_PF_DB_DREQ_INT_MASK 0xC4 +#define NE6X_PF_DB_DACK_INT 0xC5 +#define NE6X_PF_DB_DACK_INT_SET 0xC6 +#define NE6X_PF_DB_DACK_INT_MASK 0xC7 + +union ne6x_vp_int { + struct vp_int { + u64 csr_ciu_int_vp : 64; + } reg; + u64 val; +}; + +union ne6x_vp_int_mask { + struct vp_int_mask { + u64 csr_ciu_mask_vp : 64; + } reg; + u64 val; +}; + +union ne6x_cq_base_addr { + struct cq_base_addr { + u64 csr_cq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_cq_cfg { + struct cq_cfg { + u64 csr_cq_len_vp : 16; + u64 csr_cq_merge_time_vp : 16; + u64 csr_cq_merge_size_vp : 4; + u64 rsv0 : 28; + } reg; + u64 val; +}; + +union ne6x_rq_base_addr { + struct rq_base_addr { + u64 csr_rq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_rq_cfg { + struct rq_cfg { + u64 csr_rq_len_vp : 16; + u64 csr_rdq_pull_en : 1; + u64 csr_rqevt_write_back_vp : 1; + u64 csr_recv_pd_type_vp : 2; + u64 csr_recv_pd_revers_en : 1; + u64 rsv0 : 11; + u64 rsv1 : 32; + } reg; + u64 val; +}; + +union ne6x_sq_base_addr { + struct sq_base_addr { + u64 csr_sq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_sq_cfg { + struct sq_cfg { + u64 csr_sq_len_vp : 16; + u64 csr_tdq_pull_en : 1; + u64 csr_sqevt_write_back_vp : 1; + u64 csr_send_pd_revers_en : 1; + u64 rsv0 : 13; + u64 rsv1 : 32; + } reg; + u64 val; +}; + +union ne6x_rq_block_cfg { + struct rq_block_cfg { + u64 csr_rdq_mop_len : 16; + u64 csr_rdq_sop_len : 16; + u64 rsv0 : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg0 { + struct sq_meter_cfg0 { + u64 csr_meter_pkt_token_num_vp : 16; + u64 csr_meter_ipg_len_vp : 8; + u64 csr_meter_refresh_en_vp : 1; + u64 csr_meter_rate_limit_en_vp : 1; + u64 csr_meter_packet_mode_vp : 1; + u64 reserved : 37; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg1 { + struct sq_meter_cfg1 { + u64 csr_meter_refresh_count_vp : 28; + u64 reserved : 4; + u64 csr_meter_refresh_interval_vp : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg2 { + struct sq_meter_cfg2 { + u64 csr_meter_resume_threshold_vp : 32; + u64 reserved : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg3 { + struct sq_meter_cfg3 { + u64 csr_meter_pause_threshold_vp : 32; + u64 reserved : 32; + } reg; + u64 val; +}; + +union ne6x_int_cfg { + struct int_cfg { + u64 csr_sq_hdle_half_int_cnt_vp : 16; + u64 csr_rq_hdle_half_int_cnt_vp : 16; + u64 csr_cq_hdle_half_int_cnt_vp : 16; + u64 rsv0 : 16; + } reg; + u64 val; +}; + +union ne6x_ciu_time_out_cfg { + struct ciu_time_out_cfg { + u64 csr_int_timer_out_cnt : 12; + u64 rsv0 : 52; + } reg; + u64 val; +}; + +union ne6x_all_cq_cfg { + struct all_cq_cfg { + u64 csr_allcq_merge_size : 4; + u64 rsv0 : 4; + u64 csr_allcq_wt_rr_cnt : 7; + u64 csr_allcq_wt_rr_flag : 1; + u64 rsv1 : 48; + } reg; + u64 val; +}; + +union ne6x_all_sq_cfg { + struct all_sq_cfg { + u64 csr_allsq_wb_trigger_info : 8; + u64 csr_allsq_csum_zero_negate : 1; + u64 csr_allsq_pull_merge_cfg : 5; + u64 rsv0 : 50; + } reg; + u64 val; +}; + +union ne6x_all_rq_cfg { + struct all_rq_cfg { + u64 csr_allrq_wb_trigger_info : 8; + u64 csr_allrq_pull_merge_cfg : 5; + u64 rsv0 : 51; + } reg; + u64 val; +}; + +union ne6x_merge_cfg { + struct merge_cfg { + u64 csr_merge_clk_cnt : 16; + u64 rsv0 : 48; + } reg; + u64 val; +}; + +union ne6x_eth_recv_cnt { + struct eth_recv_cnt { + u64 csr_eth_pkt_drop_cnt : 32; + u64 csr_eth_rdq_drop_cnt : 32; + } reg; + u64 val; +}; + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/trace_comm.h b/drivers/net/ethernet/bzwx/nce/comm/trace_comm.h new file mode 100644 index 000000000000..8fc0e0a1f154 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/trace_comm.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#define _NE6X_TRACE_NAME(trace_name) (trace_##ne6x##_##trace_name) +#define NE6X_TRACE_NAME(trace_name) _NE6X_TRACE_NAME(trace_name) + +#define ne6x_trace(trace_name, args...) (NE6X_TRACE_NAME(trace_name) \ +(args)) + +#define ne6x_trace_enabled(trace_name) (NE6X_TRACE_NAME(trace_name##_enabled) \ +()) + +DECLARE_EVENT_CLASS(ne6x_tx_template, + TP_PROTO(struct ne6x_ring *ring, struct sk_buff *skb), + TP_ARGS(ring, skb), + + TP_STRUCT__entry(__field(void *, ring) + __field(u32, len) + __field(u32, head_len) + __dynamic_array(unsigned char, data, skb_headlen(skb)) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign(__entry->ring = ring; + __entry->len = skb->len; + __entry->head_len = skb_headlen(skb); + memcpy(__get_dynamic_array(data), skb->data, + skb_headlen(skb)); + __assign_str(devname, ring->netdev->name); + ), + + TP_printk("netdev: %s ring: %p skb_len: %d skb_headlen:%d skb_head: %s", + __get_str(devname), __entry->ring, __entry->len, + __entry->head_len, __print_array(__get_dynamic_array(data), + __get_dynamic_array_len(data), 1)) + ); + +DEFINE_EVENT(ne6x_tx_template, ne6x_tx_skb, + TP_PROTO(struct ne6x_ring *ring, struct sk_buff *skb), + TP_ARGS(ring, skb) +); + +DEFINE_EVENT(ne6x_tx_template, ne6x_tx_skb_jumbo, + TP_PROTO(struct ne6x_ring *ring, struct sk_buff *skb), + TP_ARGS(ring, skb) +); + +DECLARE_EVENT_CLASS(ne6x_tx_desc_template, + TP_PROTO(struct ne6x_ring *ring, struct ne6x_tx_desc *desc), + TP_ARGS(ring, desc), + + TP_STRUCT__entry(__field(void *, ring) + __field(void *, desc) + __field(u8, vp) + __field(u8, sop_valid) + __field(u8, eop_valid) + __field(u64, sop_cnt) + __field(u64, mop_cnt) + __field(u64, sop_addr) + __field(u64, mop_addr) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign(__entry->ring = ring; + __entry->desc = desc; + __entry->vp = desc->vp; + __entry->sop_valid = desc->sop_valid; + __entry->eop_valid = desc->eop_valid; + __entry->sop_cnt = desc->sop_cnt; + __entry->mop_cnt = desc->mop_cnt; + __entry->sop_addr = desc->buffer_sop_addr; + __entry->mop_addr = desc->buffer_mop_addr; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk("netdev: %s ring: %p desc: %p vp: %d sop_valid:%d eop_valid: %d sop_cnt: %llu mop_cnt: %llu sop_addr: %llu mop_addr: %llu", + __get_str(devname), __entry->ring, __entry->desc, + __entry->vp, __entry->sop_valid, __entry->eop_valid, + __entry->sop_cnt, __entry->mop_cnt, __entry->sop_addr, + __entry->mop_addr) + ); + +DEFINE_EVENT(ne6x_tx_desc_template, ne6x_tx_map_desc, + TP_PROTO(struct ne6x_ring *ring, struct ne6x_tx_desc *desc), + TP_ARGS(ring, desc) +); + +DEFINE_EVENT(ne6x_tx_desc_template, ne6x_tx_map_jumbo_desc, + TP_PROTO(struct ne6x_ring *ring, struct ne6x_tx_desc *desc), + TP_ARGS(ring, desc) +); + +DECLARE_EVENT_CLASS(ne6x_tx_tag_template, + TP_PROTO(struct ne6x_ring *ring, struct ne6x_tx_tag *tx_tag), + TP_ARGS(ring, tx_tag), + + TP_STRUCT__entry(__field(void *, ring) + __field(u8, pi) + __field(u8, vport) + __field(u16, vlan1) + __field(u16, vlan2) + __field(u16, mss) + __field(u16, tag_num) + __string(devname, ring->netdev->name) + ), + + TP_fast_assign(__entry->ring = ring; + __entry->pi = (tx_tag->tag_pi1 << 1) | tx_tag->tag_pi0; + __entry->vport = tx_tag->tag_vport; + __entry->vlan1 = tx_tag->tag_vlan1; + __entry->vlan2 = tx_tag->tag_vlan2; + __entry->mss = tx_tag->tag_mss; + __entry->tag_num = tx_tag->tag_num; + __assign_str(devname, ring->netdev->name); + ), + + TP_printk("netdev: %s ring: %p pi: %d vport: %d vlan1:%d vlan2: %d mss: %d tag_num: %d", + __get_str(devname), __entry->ring, __entry->pi, __entry->vport, + __entry->vlan1, __entry->vlan2, __entry->mss, __entry->tag_num) + ); + +DEFINE_EVENT(ne6x_tx_tag_template, ne6x_tx_map_tag, + TP_PROTO(struct ne6x_ring *ring, struct ne6x_tx_tag *tx_tag), + TP_ARGS(ring, tx_tag) +); + +DEFINE_EVENT(ne6x_tx_tag_template, ne6x_tx_map_jumbo_tag, + TP_PROTO(struct ne6x_ring *ring, struct ne6x_tx_tag *tx_tag), + TP_ARGS(ring, tx_tag) +); + +DECLARE_EVENT_CLASS(ne6x_rx_template, + TP_PROTO(struct ne6x_ring *ring, union ne6x_rx_desc *desc, struct sk_buff *skb), + TP_ARGS(ring, desc, skb), + TP_STRUCT__entry(__field(void *, ring) + __field(void *, desc) + __field(void *, skb) + __string(devname, ring->netdev->name)), + + TP_fast_assign(__entry->ring = ring; + __entry->desc = desc; + __entry->skb = skb; + __assign_str(devname, ring->netdev->name);), + TP_printk("netdev: %s ring: %p desc: %p skb %p", + __get_str(devname), __entry->ring, + __entry->desc, __entry->skb) +); + +DECLARE_EVENT_CLASS(ne6x_rx_head_template, + TP_PROTO(struct ne6x_ring *ring, struct rx_hdr_info *rx_hdr), + TP_ARGS(ring, rx_hdr), + TP_STRUCT__entry(__field(void *, rx_hdr) + __array(u8, headr, 12) + __field(void *, ring) + __string(devname, ring->netdev->name)), + + TP_fast_assign(__entry->ring = ring; + __entry->rx_hdr = rx_hdr; + memcpy(__entry->headr, rx_hdr, 12); + __assign_str(devname, ring->netdev->name);), + TP_printk("netdev: %s rx_hdr: %s", + __get_str(devname), __print_array(__entry->headr, 12, 1)) +); + +DEFINE_EVENT(ne6x_rx_head_template, ne6x_rx_hdr, + TP_PROTO(struct ne6x_ring *ring, struct rx_hdr_info *rx_hdr), + TP_ARGS(ring, rx_hdr) +); + +DEFINE_EVENT(ne6x_rx_template, ne6x_clean_rx_irq, + TP_PROTO(struct ne6x_ring *ring, union ne6x_rx_desc *desc, struct sk_buff *skb), + TP_ARGS(ring, desc, skb) +); + +DEFINE_EVENT(ne6x_rx_template, ne6x_clean_rx_irq_rx, + TP_PROTO(struct ne6x_ring *ring, union ne6x_rx_desc *desc, struct sk_buff *skb), + TP_ARGS(ring, desc, skb) +); + +DECLARE_EVENT_CLASS(ne6x_xmit_template, + TP_PROTO(struct sk_buff *skb, struct ne6x_ring *ring), + TP_ARGS(skb, ring), + TP_STRUCT__entry(__field(void *, skb) + __field(void *, ring) + __string(devname, ring->netdev->name)), + TP_fast_assign(__entry->skb = skb; + __entry->ring = ring; + __assign_str(devname, ring->netdev->name);), + TP_printk("netdev: %s skb: %p ring: %p", + __get_str(devname), __entry->skb, + __entry->ring)); + +DEFINE_EVENT(ne6x_xmit_template, ne6x_xmit_frame_ring, + TP_PROTO(struct sk_buff *skb, struct ne6x_ring *ring), + TP_ARGS(skb, ring)); + +DEFINE_EVENT(ne6x_xmit_template, ne6x_xmit_frame_ring_drop, + TP_PROTO(struct sk_buff *skb, struct ne6x_ring *ring), + TP_ARGS(skb, ring)); diff --git a/drivers/net/ethernet/bzwx/nce/comm/txrx.c b/drivers/net/ethernet/bzwx/nce/comm/txrx.c new file mode 100644 index 000000000000..e6ae2f689915 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/txrx.c @@ -0,0 +1,1566 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "txrx.h" +#include "ne6x_trace.h" + +int ne6x_setup_tx_descriptors(struct ne6x_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_buf); + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + tx_ring->tx_buf = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_buf) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct ne6x_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + return 0; + +err: + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + return -ENOMEM; +} + +int ne6x_setup_cq_descriptors(struct ne6x_ring *cq_ring) +{ + struct device *dev = cq_ring->dev; + + if (!dev) + return -ENOMEM; + + /* round up to nearest 4K */ + cq_ring->size = cq_ring->count * sizeof(struct ne6x_cq_desc); + cq_ring->size = ALIGN(cq_ring->size, 4096); + cq_ring->desc = dma_alloc_coherent(dev, cq_ring->size, &cq_ring->dma, GFP_KERNEL); + if (!cq_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + cq_ring->size); + goto err; + } + + cq_ring->next_to_use = 0; + cq_ring->next_to_clean = 0; + + return 0; + +err: + return -ENOMEM; +} + +int ne6x_setup_tg_descriptors(struct ne6x_ring *tg_ring) +{ + struct device *dev = tg_ring->dev; + + if (!dev) + return -ENOMEM; + + /* round up to nearest 4K */ + tg_ring->size = tg_ring->count * sizeof(struct ne6x_tx_tag); + tg_ring->size = ALIGN(tg_ring->size, 4096); + tg_ring->desc = dma_alloc_coherent(dev, tg_ring->size, &tg_ring->dma, GFP_KERNEL); + if (!tg_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tg_ring->size); + goto err; + } + + tg_ring->next_to_use = 0; + tg_ring->next_to_clean = 0; + + return 0; + +err: + return -ENOMEM; +} + +int ne6x_setup_rx_descriptors(struct ne6x_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int err = -ENOMEM; + int bi_size; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_buf); + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + rx_ring->rx_buf = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_buf) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ne6x_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; + + return 0; + +err: + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + return err; +} + +int ne6x_setup_tx_sgl(struct ne6x_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + + if (!dev) + return -ENOMEM; + tx_ring->sgl = kzalloc(sizeof(*tx_ring->sgl), GFP_KERNEL); + + if (!tx_ring->sgl) + goto err; + + return 0; +err: + return -ENOMEM; +} + +static inline unsigned int ne6x_txd_use_count(unsigned int size) +{ + return ((size * 85) >> 20) + 1; +} + +bool __ne6x_chk_linearize(struct sk_buff *skb); +static inline bool ne6x_chk_linearize(struct sk_buff *skb, int count) +{ + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < NE6X_MAX_BUFFER_TXD)) + return false; + + if (skb_is_gso(skb)) + return __ne6x_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != NE6X_MAX_BUFFER_TXD; +} + +int __ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size); + +static inline int ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size) +{ + if (likely(NE6X_DESC_UNUSED(tx_ring) >= size)) + return 0; + + return __ne6x_maybe_stop_tx(tx_ring, size); +} + +static inline bool ne6x_rx_is_programming_status(u8 status) +{ + return status & 0x20; +} + +static void ne6x_reuse_rx_page(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct ne6x_rx_buf *new_buff; + + new_buff = &rx_ring->rx_buf[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static void ne6x_clean_programming_status(struct ne6x_ring *rx_ring, + union ne6x_rx_desc *rx_desc, + u8 status) +{ + u32 ntc = rx_ring->next_to_clean; + struct ne6x_rx_buf *rx_buffer; + + /* fetch, update, and store next to clean */ + rx_buffer = &rx_ring->rx_buf[ntc++]; + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NE6X_RX_DESC(rx_ring, ntc)); + + /* place unused page back on the ring */ + ne6x_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +static struct ne6x_rx_buf *ne6x_get_rx_buffer(struct ne6x_ring *rx_ring, const unsigned int size) +{ + struct ne6x_rx_buf *rx_buffer; + + rx_buffer = &rx_ring->rx_buf[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void ne6x_add_rx_frag(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = ne6x_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, + size, truesize); + + /* page is being used so we must update the page offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *ne6x_construct_skb(struct ne6x_ring *rx_ring, + struct ne6x_rx_buf *rx_buffer, + unsigned int size) +{ + void *page_addr = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ne6x_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(size); +#endif + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES)); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, NE6X_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > NE6X_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, page_addr, NE6X_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), page_addr, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, rx_buffer->page_offset + headlen, size, + truesize); + + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + /* buffer is unused, reset bias back to rx_buffer */ + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static inline bool ne6x_page_is_reusable(struct page *page) +{ + return (page_to_nid(page) == numa_mem_id()) && !page_is_pfmemalloc(page); +} + +static bool ne6x_can_reuse_rx_page(struct ne6x_rx_buf *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* Is any reuse possible? */ + if (unlikely(!ne6x_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_count(page) - pagecnt_bias) > 1)) + return false; +#else +#define NE6X_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - NE6X_RXBUFFER_4096) + if (rx_buffer->page_offset > NE6X_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +static void ne6x_put_rx_buffer(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *rx_buffer) +{ + if (ne6x_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ne6x_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +static inline bool ne6x_test_staterr(union ne6x_rx_desc *rx_desc, const u8 stat_err_bits) +{ + return !!(rx_desc->wb.u.val & stat_err_bits); +} + +static bool ne6x_is_non_eop(struct ne6x_ring *rx_ring, union ne6x_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NE6X_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define NE6X_RXD_EOF BIT(NE6X_RX_DESC_STATUS_EOF_SHIFT) + if (likely(ne6x_test_staterr(rx_desc, NE6X_RXD_EOF))) + return false; + + rx_ring->rx_stats.non_eop_descs++; + rx_desc->wb.u.val = 0; + + return true; +} + +static bool ne6x_cleanup_headers(struct ne6x_ring *rx_ring, struct sk_buff *skb, + union ne6x_rx_desc *rx_desc) +{ + if (unlikely(ne6x_test_staterr(rx_desc, BIT(NE6X_RX_DESC_STATUS_ERR_SHIFT)))) { + dev_kfree_skb_any(skb); + rx_ring->rx_stats.rx_mem_error++; + return true; + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static inline void ne6x_rx_hash(struct ne6x_ring *ring, union ne6x_rx_desc *rx_desc, + struct sk_buff *skb, struct rx_hdr_info *rx_hdr) +{ + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + if (rx_hdr->ol_flag.flag_bits.rx_rss_hash) + skb_set_hash(skb, rx_hdr->rss_hash, PKT_HASH_TYPE_NONE); +} + +static inline void ne6x_rx_checksum(struct ne6x_ring *rx_ring, struct sk_buff *skb, + union ne6x_rx_desc *rx_desc, + struct rx_hdr_info *rx_hdr) +{ + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 0; + skb_checksum_none_assert(skb); + + if (!(rx_ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (rx_hdr->ol_flag.flag_bits.rx_ip_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_l4_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_inner_ip_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_inner_l4_cksum_bad) { + rx_ring->rx_stats.csum_err++; + } else if (rx_hdr->ol_flag.flag_bits.rx_ip_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_l4_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_inner_ip_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_inner_l4_cksum_good) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + } +} + +static inline void ne6x_process_skb_fields(struct ne6x_ring *rx_ring, + union ne6x_rx_desc *rx_desc, + struct sk_buff *skb, + struct rx_hdr_info *rx_hdr) +{ + netdev_features_t features = rx_ring->netdev->features; + bool non_zero_vlan = false; + + ne6x_rx_hash(rx_ring, rx_desc, skb, rx_hdr); + rx_hdr->vlan_tci = ntohs(rx_hdr->vlan_tci); + rx_hdr->vlan_tci_outer = ntohs(rx_hdr->vlan_tci_outer); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + if (rx_hdr->ol_flag.flag_bits.rx_vlan_striped) { + non_zero_vlan = !!(rx_hdr->vlan_tci_outer & VLAN_VID_MASK); + if (non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + (rx_hdr->vlan_tci_outer)); + } + } + } else if (features & NETIF_F_HW_VLAN_STAG_RX) { + if (rx_hdr->ol_flag.flag_bits.rx_qinq_striped) { + non_zero_vlan = !!(rx_hdr->vlan_tci_outer & VLAN_VID_MASK); + if (non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + (rx_hdr->vlan_tci_outer)); + } + } + } + + ne6x_rx_checksum(rx_ring, skb, rx_desc, rx_hdr); + skb_record_rx_queue(skb, rx_ring->queue_index); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void ne6x_receive_skb(struct ne6x_ring *rx_ring, struct sk_buff *skb) +{ + struct ne6x_q_vector *q_vector = rx_ring->q_vector; + + napi_gro_receive(&q_vector->napi, skb); +} + +static bool ne6x_alloc_mapped_page(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + /* alloc new page for storage */ + page = dev_alloc_pages(ne6x_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, ne6x_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + NE6X_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ne6x_rx_pg_order(rx_ring)); + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +void ne6x_tail_update(struct ne6x_ring *ring, int val) +{ + int i; + + for (i = 0; i < NE6X_TAIL_REG_NUM; i++) + writeq(val, ring->tail + i); +} + +static inline void ne6x_release_rx_desc(struct ne6x_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ne6x_tail_update(rx_ring, val); +} + +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count) +{ + u16 ntu = rx_ring->next_to_use; + union ne6x_rx_desc *rx_desc; + struct ne6x_rx_buf *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return false; + + rx_desc = NE6X_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_buf[ntu]; + + do { + if (!ne6x_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->wb.u.val = 0; + rx_desc->w.buffer_mop_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->w.buffer_sop_addr = 0; + rx_desc->w.mop_mem_len = rx_ring->rx_buf_len; + rx_desc->wb.pkt_len = 0; + rx_desc->w.vp = rx_ring->reg_idx; + + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = NE6X_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buf; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.u.val = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + ne6x_release_rx_desc(rx_ring, ntu); + + return false; + +no_buffers: + if (rx_ring->next_to_use != ntu) + ne6x_release_rx_desc(rx_ring, ntu); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; +} + +static void ne6x_get_rx_head_info(struct sk_buff *skb, struct rx_hdr_info *rx_hdr) +{ + skb_frag_t *frag; + void *page_addr; + u32 temp_len, i; + + if (skb->data_len == 0) { + memcpy(rx_hdr, &skb->data[skb->len - 16], sizeof(struct rx_hdr_info)); + } else { + if (skb_shinfo(skb)->nr_frags > 1) { + i = skb_shinfo(skb)->nr_frags - 1; + frag = &skb_shinfo(skb)->frags[i]; + if (skb_frag_size(frag) >= 16) { + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } else if (skb_frag_size(frag) > 4) { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + memcpy((char *)rx_hdr + 16 - temp_len, page_addr, temp_len - 4); + frag = &skb_shinfo(skb)->frags[i - 1]; + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16 + + temp_len; + memcpy(rx_hdr, page_addr, 16 - temp_len); + } else { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + frag = &skb_shinfo(skb)->frags[i - 1]; + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16 + + temp_len; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } + } else { + frag = &skb_shinfo(skb)->frags[0]; + if (skb_frag_size(frag) >= 16) { + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } else if (skb_frag_size(frag) > 4) { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + memcpy((char *)rx_hdr + 16 - temp_len, page_addr, temp_len - 4); + page_addr = &skb->data[skb->len - skb->data_len - 16 + temp_len]; + memcpy(rx_hdr, page_addr, 16 - temp_len); + } else { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + page_addr = &skb->data[skb->len - skb->data_len - 16 + temp_len]; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } + } + } +} + +static void ne6x_clean_tx_desc(struct ne6x_tx_desc *tx_desc, struct ne6x_ring *ring) +{ + if (tx_desc->u.flags.tx_drop_addr) + ring->tx_stats.tx_drop_addr++; + + if (tx_desc->u.flags.tx_ecc_err) + ring->tx_stats.tx_ecc_err++; + + if (tx_desc->u.flags.tx_pcie_read_err) { + ring->tx_stats.tx_pcie_read_err++; + dev_info(ring->dev, "**** tx_desc: flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d], sopv[%d], eopv[%d], tso[%d], l3chk[%d], l3oft[%d], l4chk[%d], l4oft[%d], pld[%d], mop[%d], sop[%d], mss[%d],mopa[%lld],sopa[%lld]\n", + tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, tx_desc->chain, + tx_desc->transmit_type, tx_desc->sop_valid, tx_desc->eop_valid, + tx_desc->tso, tx_desc->l3_csum, tx_desc->l3_ofst, tx_desc->l4_csum, + tx_desc->l4_ofst, tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, + tx_desc->mss, tx_desc->buffer_mop_addr, tx_desc->buffer_sop_addr); + } + + tx_desc->u.val = 0; + tx_desc->vp = 0; + tx_desc->event_trigger = 0; + tx_desc->chain = 0; + tx_desc->transmit_type = 0; + tx_desc->sop_valid = 0; + tx_desc->eop_valid = 0; + tx_desc->tso = 0; + tx_desc->l3_csum = 0; + tx_desc->l3_ofst = 0; + tx_desc->l4_csum = 0; + tx_desc->l4_ofst = 0; + tx_desc->pld_ofst = 0; + tx_desc->mop_cnt = 0; + tx_desc->sop_cnt = 0; + tx_desc->mss = 0; + tx_desc->buffer_mop_addr = 0; + tx_desc->buffer_sop_addr = 0; +} + +int ne6x_clean_cq_irq(struct ne6x_q_vector *q_vector, struct ne6x_ring *cq_ring, int napi_budget) +{ + struct ne6x_cq_desc *cq_desc = NULL; + struct ne6x_tx_desc *tx_desc = NULL; + struct ne6x_ring *clean_ring = NULL; + union ne6x_rx_desc *rx_desc = NULL; + int i, cq_num, off_idx, ntc; + int budget = napi_budget; + int last_expect = 0; + int total = 0; + + do { + cq_desc = NE6X_CQ_DESC(cq_ring, cq_ring->next_to_use); + cq_num = cq_desc->num; + if (!cq_num) + break; + + dma_rmb(); + cq_ring->stats.packets += cq_num; + + if (cq_desc->ctype) { + clean_ring = q_vector->rx.ring; + last_expect = clean_ring->cq_last_expect; + for (i = 0; i < cq_num; i++) { + off_idx = cq_desc->payload.rx_cq[i].cq_rx_offset; + if (unlikely(off_idx != last_expect)) { + netdev_err(cq_ring->netdev, "ne6xpf: cqrx err, need debug! cq: %d, rx: %d\n", + off_idx, last_expect); + netdev_err(cq_ring->netdev, "ne6xpf: queue: %d, vp: %d, rxq: %d\n", + cq_ring->queue_index, cq_ring->reg_idx, + clean_ring->queue_index); + } + + rx_desc = NE6X_RX_DESC(clean_ring, off_idx); + rx_desc->wb.u.val = cq_desc->payload.rx_cq[i].cq_rx_stats; + rx_desc->wb.pkt_len = cq_desc->payload.rx_cq[i].cq_rx_len; + if (rx_desc->wb.pkt_len > clean_ring->rx_buf_len) { + if (!rx_desc->wb.u.flags.rx_eop) + rx_desc->wb.pkt_len = clean_ring->rx_buf_len; + else + rx_desc->wb.pkt_len = rx_desc->wb.pkt_len % + clean_ring->rx_buf_len ? + rx_desc->wb.pkt_len % + clean_ring->rx_buf_len : + clean_ring->rx_buf_len; + } + + last_expect++; + last_expect = (last_expect < clean_ring->count) ? last_expect : 0; + } + + cq_ring->cq_stats.rx_num += cq_num; + } else { + clean_ring = q_vector->tx.ring; + last_expect = clean_ring->cq_last_expect; + for (i = 0; i < cq_num; i++) { + off_idx = cq_desc->payload.tx_cq[i].cq_tx_offset; + if (unlikely(off_idx != last_expect)) { + netdev_info(cq_ring->netdev, "ne6xpf: cqtx err, need debug! cq: %d, tx: %d\n", + off_idx, last_expect); + netdev_info(cq_ring->netdev, "ne6xpf: queue: %d, vp: %d, txq: %d\n", + cq_ring->queue_index, cq_ring->reg_idx, + clean_ring->queue_index); + } + + tx_desc = NE6X_TX_DESC(clean_ring, off_idx); + tx_desc->u.val = cq_desc->payload.tx_cq[i].cq_tx_stats; + last_expect++; + last_expect = (last_expect < clean_ring->count) ? last_expect : 0; + } + + cq_ring->cq_stats.tx_num += cq_num; + } + + clean_ring->cq_last_expect = last_expect; + cq_ring->cq_stats.cq_num++; + + /* clean cq desc */ + cq_desc->num = 0; + ntc = cq_ring->next_to_use + 1; + ntc = (ntc < cq_ring->count) ? ntc : 0; + cq_ring->next_to_use = ntc; + prefetch(NE6X_CQ_DESC(cq_ring, ntc)); + + budget--; + total++; + } while (likely(budget)); + + if (NE6X_DESC_UNUSED(cq_ring) < 1024) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + return total; +} + +int ne6x_clean_rx_irq(struct ne6x_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = NE6X_DESC_UNUSED(rx_ring); + struct ne6x_rx_buf *rx_buffer = NULL; + struct sk_buff *skb = rx_ring->skb; + union ne6x_rx_desc *rx_desc = NULL; + struct rx_hdr_info rx_hdr; + bool failure = false; + unsigned int size; + u8 rx_status; + + while (likely(total_rx_packets < (unsigned int)budget)) { + if (cleaned_count >= NE6X_RX_BUFFER_WRITE) { + failure = failure || ne6x_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + rx_desc = NE6X_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_status = rx_desc->wb.u.val; + if (!rx_status) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + if (unlikely(ne6x_rx_is_programming_status(rx_status))) { + rx_ring->rx_stats.rx_err++; + ne6x_clean_programming_status(rx_ring, rx_desc, rx_status); + cleaned_count++; + continue; + } + + size = rx_desc->wb.pkt_len; + ne6x_trace(clean_rx_irq, rx_ring, rx_desc, skb); + rx_buffer = ne6x_get_rx_buffer(rx_ring, size); + + /* retrieve a buffer from the ring */ + if (skb) + ne6x_add_rx_frag(rx_ring, rx_buffer, skb, size); + else + skb = ne6x_construct_skb(rx_ring, rx_buffer, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buf_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + ne6x_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + if (ne6x_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + if (ne6x_cleanup_headers(rx_ring, skb, rx_desc)) { + skb = NULL; + continue; + } + + ne6x_get_rx_head_info(skb, &rx_hdr); + ne6x_trace(rx_hdr, rx_ring, &rx_hdr); + pskb_trim(skb, skb->len - 16); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + ne6x_process_skb_fields(rx_ring, rx_desc, skb, &rx_hdr); + + ne6x_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); + + ne6x_receive_skb(rx_ring, skb); + skb = NULL; + + rx_desc->wb.u.val = 0; + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + + /* guarantee a trip back through this routine if there was a failure */ + return failure ? budget : (int)total_rx_packets; +} + +int ne6x_clean_tx_irq(struct ne6x_adapt_comm *comm, struct ne6x_ring *tx_ring, int napi_budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct ne6x_tx_desc *eop_desc = NULL; + u16 i = tx_ring->next_to_clean; + struct ne6x_tx_desc *tx_desc; + struct ne6x_tx_buf *tx_buf; + unsigned int budget = 256; + + tx_buf = &tx_ring->tx_buf[i]; + tx_desc = NE6X_TX_DESC(tx_ring, i); + + if (unlikely(tx_buf->jumbo_frame)) { + tx_buf->napi_budget += napi_budget; + if (!tx_buf->jumbo_finsh) + return !!budget; + + napi_budget = tx_buf->napi_budget; + } + + do { + eop_desc = tx_buf->next_to_watch; + if (!eop_desc) + break; + + prefetchw(&tx_buf->skb->users); + + if (!eop_desc->u.val) + break; + + dma_rmb(); + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + tx_buf->jumbo_frame = 0; + tx_buf->jumbo_finsh = 0; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb/XDP data */ + ne6x_clean_tx_desc(tx_desc, tx_ring); + + /* free the skb */ + napi_consume_skb(tx_buf->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buf++; + tx_desc++; + i++; + if (i == tx_ring->count) { + i = 0; + tx_buf = tx_ring->tx_buf; + tx_desc = NE6X_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + + /* free the skb/XDP data */ + ne6x_clean_tx_desc(tx_desc, tx_ring); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (i == tx_ring->count) { + i = 0; + tx_buf = tx_ring->tx_buf; + tx_desc = NE6X_TX_DESC(tx_ring, 0); + } + + if (unlikely(tx_buf->jumbo_frame && !tx_buf->jumbo_finsh)) + break; + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (total_packets) { + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + + /* notify netdev of completed buffers */ + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (NE6X_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && + !test_bit(NE6X_ADPT_DOWN, comm->state)) { + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_q; + } + } + } + + return !!budget; +} + +static inline int ne6x_xmit_descriptor_count(struct sk_buff *skb) +{ + int count = 0; + + count = 1; + count += skb_shinfo(skb)->nr_frags; + + return count; +} + +int __ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(NE6X_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static inline u16 ne6x_gso_get_seg_hdrlen(struct sk_buff *skb) +{ + u16 gso_hdr_len; + + gso_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (unlikely(skb->encapsulation)) + gso_hdr_len = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + + return gso_hdr_len; +} + +static int ne6x_tso(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + struct sk_buff *skb = first->skb; + u8 hdrlen = 0; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL || !skb_is_gso(skb)) + return 0; + + hdrlen = ne6x_gso_get_seg_hdrlen(skb); + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* update gso_segs and bytecount */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * hdrlen; + + ptx_tag->tag_mss = skb_shinfo(skb)->gso_size; + + return 1; +} + +static void ne6x_tx_prepare_vlan_flags(struct ne6x_ring *tx_ring, + struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + struct sk_buff *skb = first->skb; + + /* nothing left to do, software offloaded VLAN */ + if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) + return; + + /* the VLAN ethertype/tpid is determined by adapter configuration and netdev + * feature flags, which the driver only allows either 802.1Q or 802.1ad + * VLAN offloads exclusively so we only care about the VLAN ID here + */ + if (skb_vlan_tag_present(skb)) { + if (tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + ptx_tag->tag_vlan2 = cpu_to_be16(skb_vlan_tag_get(skb)); + else if (tx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_TX) + ptx_tag->tag_vlan1 = cpu_to_be16(skb_vlan_tag_get(skb)); + } +} + +static int ne6x_tx_csum(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + tx_ring->tx_stats.csum_good++; + return 1; +} + +static inline void ne6x_tx_desc_push(struct ne6x_tx_desc *tx_desc, + dma_addr_t dma, u32 size) +{ + tx_desc->buffer_mop_addr = cpu_to_le64(dma); + tx_desc->mop_cnt = size; + tx_desc->event_trigger = 1; +} + +void ne6x_unmap_and_free_tx_resource(struct ne6x_ring *ring, + struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +static inline void ne6x_fill_gso_sg(void *p, u16 offset, u16 len, struct ne6x_sg_info *sg) +{ + sg->p = p; + sg->offset = offset; + sg->len = len; +} + +int ne6x_fill_jumbo_sgl(struct ne6x_ring *tx_ring, struct sk_buff *skb) +{ + u16 sg_max_dlen = 0, dlen = 0, len = 0, offset = 0, send_dlen = 0, total_dlen = 0; + u16 subframe = 0, send_subframe = 0, sg_avail = 0, i = 0, j = 0; + u16 gso_hdr_len = ne6x_gso_get_seg_hdrlen(skb); + struct ne6x_sg_list *sgl = tx_ring->sgl; + + WARN_ON(!sgl); + + memset(sgl, 0, sizeof(struct ne6x_sg_list)); + dlen = skb_headlen(skb) - gso_hdr_len; + sgl->mss = skb_shinfo(skb)->gso_size; + sg_max_dlen = NE6X_MAX_DATA_PER_TXD - gso_hdr_len; + sg_max_dlen = ((u16)(sg_max_dlen / sgl->mss)) * sgl->mss; + total_dlen = skb->data_len + dlen; + sgl->sgl_mss_cnt = sg_max_dlen / sgl->mss; + subframe = total_dlen / sg_max_dlen; + subframe += total_dlen % sg_max_dlen ? 1 : 0; + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_FST_SG_FLAG | NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + offset = gso_hdr_len; + sg_avail = sg_max_dlen; + ++send_subframe; + i++; + while (dlen) { + len = dlen > sg_avail ? sg_avail : dlen; + ne6x_fill_gso_sg(skb->data, offset, len, &sgl->sg[i]); + offset += len; + dlen -= len; + send_dlen += len; + sg_avail -= len; + if (send_dlen == total_dlen) + goto end; + + if (!(send_dlen % sg_max_dlen)) { + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + + sgl->sg[i].flag |= NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + if (++send_subframe == subframe) + sgl->sg[i].flag |= NE6X_SG_LST_SG_FLAG; + + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + sg_avail = sg_max_dlen; + } + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + } + + for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[j]; + + dlen = skb_frag_size(f); + offset = 0; + while (dlen) { + len = dlen > sg_avail ? sg_avail : dlen; + ne6x_fill_gso_sg(f, offset, len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_FRAG_FLAG; + + offset += len; + dlen -= len; + send_dlen += len; + sg_avail -= len; + if (send_dlen == total_dlen) + goto end; + if (!(send_dlen % sg_max_dlen)) { + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + if (++send_subframe == subframe) + sgl->sg[i].flag |= NE6X_SG_LST_SG_FLAG; + sg_avail = sg_max_dlen; + } + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + } + offset = 0; + } +end: + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + sgl->sg_num = ++i; + return 0; +err: + return -1; +} + +void ne6x_fill_tx_desc(struct ne6x_tx_desc *tx_desc, u8 vp, dma_addr_t tag_dma, + dma_addr_t dma, struct ne6x_sg_info *sg) +{ + memset(tx_desc, 0, NE6X_TX_DESC_SIZE); + tx_desc->buffer_mop_addr = cpu_to_le64(dma); + tx_desc->buffer_sop_addr = (sg->flag & NE6X_SG_SOP_FLAG) ? cpu_to_le64(tag_dma) : 0; + tx_desc->mop_cnt = sg->len; + tx_desc->event_trigger = 1; + tx_desc->vp = vp; + tx_desc->sop_valid = (sg->flag & NE6X_SG_SOP_FLAG) ? 1u : 0u; + tx_desc->eop_valid = (sg->flag & NE6X_SG_EOP_FLAG) ? 1u : 0u; + tx_desc->sop_cnt = (sg->flag & NE6X_SG_SOP_FLAG) ? 32 : 0; + if (tx_desc->eop_valid) { + tx_desc->sop_cnt = tx_desc->mop_cnt; + tx_desc->buffer_sop_addr = tx_desc->buffer_mop_addr; + tx_desc->mop_cnt = 4; + } +} + +void ne6x_fill_tx_priv_tag(struct ne6x_ring *tx_ring, struct ne6x_tx_tag *tx_tag, + int mss, struct ne6x_sg_info *sg) +{ + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)tx_ring->adpt; + + tx_tag->tag_pi1 = (comm->port_info & 0x2) ? 1 : 0; + tx_tag->tag_pi0 = (comm->port_info & 0x1) ? 1 : 0; + tx_tag->tag_vport = (comm->port_info >> 8) & 0xFF; + tx_tag->tag_mss = cpu_to_be16(mss); + tx_tag->tag_num = sg->base_mss_no | (sg->flag & NE6X_SG_JUMBO_FLAG) | + (sg->flag & NE6X_SG_LST_SG_FLAG) | + (sg->flag & NE6X_SG_FST_SG_FLAG); + tx_tag->tag_num = cpu_to_be16(tx_tag->tag_num); +} + +void ne6x_xmit_jumbo(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_ring *tag_ring, struct ne6x_tx_tag *tx_tag) +{ + int j = 0; + struct ne6x_sg_list *sgl = tx_ring->sgl; + struct ne6x_sg_info *sg; + dma_addr_t dma, tag_dma; + struct sk_buff *skb = first->skb; + struct ne6x_tx_buf *tx_bi; + struct ne6x_tx_tag *tag_desc = tx_tag; + u32 i = tx_ring->next_to_use; + struct ne6x_tx_desc *tx_desc = NE6X_TX_DESC(tx_ring, i); + + for (; j < sgl->sg_num; j++) { + sg = &sgl->sg[j]; + if (likely(sg->flag & NE6X_SG_FRAG_FLAG)) { + dma = skb_frag_dma_map(tx_ring->dev, sg->p, sg->offset, sg->len, + DMA_TO_DEVICE); + } else { + dma = dma_map_single(tx_ring->dev, sg->p + sg->offset, sg->len, + DMA_TO_DEVICE); + } + + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + tx_bi = &tx_ring->tx_buf[i]; + + dma_unmap_len_set(tx_bi, len, sg->len); + + dma_unmap_addr_set(tx_bi, dma, dma); + + if (sg->flag & NE6X_SG_SOP_FLAG) { + tag_dma = tag_ring->dma + tag_ring->next_to_use * NE6X_TX_PRIV_TAG_SIZE; + tag_desc = NE6X_TX_TAG(tag_ring, tag_ring->next_to_use); + ne6x_fill_tx_priv_tag(tx_ring, tag_desc, sgl->mss, sg); + ne6x_trace(tx_map_jumbo_tag, tx_ring, tag_desc); + if (++tag_ring->next_to_use == tag_ring->count) + tag_ring->next_to_use = 0; + } else { + tag_dma = 0; + } + + tx_desc = NE6X_TX_DESC(tx_ring, i); + ne6x_fill_tx_desc(tx_desc, tx_ring->reg_idx, tag_dma, dma, sg); + ne6x_trace(tx_map_jumbo_desc, tx_ring, tx_desc); + if (++i == tx_ring->count) + i = 0; + } + tx_ring->next_to_use = i; + ne6x_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + ne6x_tail_update(tx_ring, i); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + first->jumbo_finsh = 1u; + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_buf[i]; + ne6x_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + + if (i == 0) + i = tx_ring->count; + + i--; + } + + tx_ring->next_to_use = i; +} + +void ne6x_xmit_simple(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_ring *tag_ring, struct ne6x_tx_tag *tx_tag) +{ + struct sk_buff *skb = first->skb; + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)tx_ring->adpt; + struct ne6x_tx_desc *tx_desc, *first_desc; + unsigned int size = skb_headlen(skb); + u32 i = tx_ring->next_to_use; + struct ne6x_tx_tag *ttx_desc; + struct ne6x_tx_buf *tx_bi; + bool is_first = true; + int send_len = 0; + skb_frag_t *frag; + dma_addr_t dma; + __le64 mss = 0; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + first_desc = NE6X_TX_DESC(tx_ring, i); + tx_desc = NE6X_TX_DESC(tx_ring, i); + mss = tx_desc->mss; + tx_desc->sop_valid = 1; + tx_desc->eop_valid = 0; + tx_bi = first; + + ttx_desc = (struct ne6x_tx_tag *)tx_tag; + ttx_desc->tag_pi1 = (comm->port_info & 0x2) ? 1 : 0; + ttx_desc->tag_pi0 = (comm->port_info & 0x1) ? 1 : 0; + ttx_desc->tag_vport = (comm->port_info >> 8) & 0xFF; + ttx_desc->tag_mss = tx_tag->tag_mss; + ttx_desc->tag_num = 0x0; + send_len += size; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + ne6x_tx_desc_push(tx_desc, dma, size); + tx_desc->vp = tx_ring->reg_idx; + tx_desc->tso = 0x0; + tx_desc->l3_csum = 0x00; + tx_desc->l3_ofst = 0x00; + tx_desc->l4_csum = 0x00; + tx_desc->l4_ofst = 0x00; + tx_desc->pld_ofst = 0x00; + tx_desc->u.val = 0x0; + tx_desc->rsv4 = 0; + if (is_first) { + tx_desc->sop_valid = 1u; + is_first = false; + tx_desc->sop_cnt = 32; + tx_desc->buffer_sop_addr = cpu_to_le64(first->tag_dma); + } + + if (send_len == skb->len) { + tx_desc->eop_valid = 1u; + break; + } + ne6x_trace(tx_map_desc, tx_ring, tx_desc); + if (++i == tx_ring->count) + i = 0; + + tx_desc = NE6X_TX_DESC(tx_ring, i); + + size = skb_frag_size(frag); + send_len += size; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_buf[i]; + } + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + if (++tag_ring->next_to_use == tag_ring->count) + tag_ring->next_to_use = 0; + + ne6x_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + ne6x_tail_update(tx_ring, i); + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_buf[i]; + ne6x_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + + if (i == 0) + i = tx_ring->count; + + i--; + } + + tx_ring->next_to_use = i; +} + +netdev_tx_t ne6x_xmit_frame_ring(struct sk_buff *skb, struct ne6x_ring *tx_ring, + struct ne6x_ring *tag_ring, bool jumbo_frame) +{ + struct ne6x_tx_tag *tx_tagx = NE6X_TX_TAG(tag_ring, tag_ring->next_to_use); + struct ne6x_tx_buf *first; + int tso, count; + + /* prefetch the data, we'll need it later */ + prefetch(tx_tagx); + prefetch(skb->data); + + ne6x_trace(xmit_frame_ring, skb, tx_ring); + + if (!jumbo_frame) { + count = ne6x_xmit_descriptor_count(skb); + } else { + if (ne6x_fill_jumbo_sgl(tx_ring, skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + count = tx_ring->sgl->sg_num; + } + /* reserve 5 descriptors to avoid tail over-write */ + if (ne6x_maybe_stop_tx(tx_ring, count + 4 + 1)) { + /* this is a hard error */ + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buf[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* record initial flags and protocol */ + + first->jumbo_frame = 0; + first->jumbo_finsh = 0; + first->tag_dma = tag_ring->dma + tag_ring->next_to_use * sizeof(struct ne6x_tx_tag); + memset(tx_tagx, 0x00, sizeof(*tx_tagx)); + + ne6x_tx_prepare_vlan_flags(tx_ring, first, tx_tagx); + + tso = ne6x_tso(tx_ring, first, tx_tagx); + if (tso < 0) + goto out_drop; + + tso = ne6x_tx_csum(tx_ring, first, tx_tagx); + if (tso < 0) + goto out_drop; + + tx_tagx->tag_mss = cpu_to_be16(tx_tagx->tag_mss); + + if (!jumbo_frame) { + ne6x_xmit_simple(tx_ring, first, tag_ring, tx_tagx); + } else { + first->jumbo_frame = true; + ne6x_xmit_jumbo(tx_ring, first, tag_ring, tx_tagx); + } + + return NETDEV_TX_OK; + +out_drop: + ne6x_trace(xmit_frame_ring_drop, first->skb, tx_ring); + ne6x_unmap_and_free_tx_resource(tx_ring, first); + + return NETDEV_TX_OK; +} diff --git a/drivers/net/ethernet/bzwx/nce/comm/txrx.h b/drivers/net/ethernet/bzwx/nce/comm/txrx.h new file mode 100644 index 000000000000..8b35bc385aa5 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/txrx.h @@ -0,0 +1,476 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _TXRX_H +#define _TXRX_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define NE6X_MAX_NUM_DESCRIPTORS (16 * 1024) +#define NE6X_DEFAULT_NUM_DESCRIPTORS (4 * 1024) +#define NE6X_MIN_NUM_DESCRIPTORS 64 +#define NE6X_REQ_DESCRIPTOR_MULTIPLE 32 + +#define NE6X_MAX_BUFFER_TXD 8 +#define NE6X_MIN_TX_LEN 60 + +#define NE6X_TAIL_REG_NUM 4 +#define NE6X_RX_BUFFER_WRITE 32 /* Must be power of 2 */ + +/* The size limit for a transmit buffer in a descriptor is 15K. + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define NE6X_MAX_READ_REQ_SIZE 4096 +#define NE6X_MAX_DATA_PER_TXD (15500 - 32 - 4 - 1) +#define NE6X_MAX_DATA_PER_TXD_ALIGNED \ + (NE6X_MAX_DATA_PER_TXD & ~(NE6X_MAX_READ_REQ_SIZE - 1)) + +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define NE6X_PACKET_HDR_PAD ETH_HLEN +#define NE6X_RXBUFFER_256 256 +#define NE6X_RXBUFFER_2048 2048 +#define NE6X_RXBUFFER_4096 4096 /* Used for large frames w/ padding */ +/*CIU buffer max len is 15k*/ +#define NE6X_MAX_RXBUFFER 15360 /* largest size for single descriptor */ +#define NE6X_MIN_MTU_SIZE 128 +#define NE6X_RX_HDR_SIZE NE6X_RXBUFFER_256 + +#define NE6X_TX_PRIV_TAG_SIZE 32 +#define NE6X_TX_DESC_SIZE 32 +/* iterator for handling rings in ring container */ +#define ne6x_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +#define NE6X_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define NE6X_RX_DESC(R, i) (&(((union ne6x_rx_desc *)((R)->desc))[i])) +#define NE6X_TX_DESC(R, i) (&(((struct ne6x_tx_desc *)((R)->desc))[i])) +#define NE6X_TX_TAG(R, i) (&(((struct ne6x_tx_tag *)((R)->desc))[i])) +#define NE6X_CQ_DESC(R, i) (&(((struct ne6x_cq_desc *)((R)->desc))[i])) + +#define NE6X_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? \ + 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) + +struct ne6x_tx_desc_status { + /* pkt drop */ + u8 tx_drop_addr : 1; + u8 rsv3 : 1; + u8 rsv2 : 1; + /* normal */ + u8 tx_done : 1; + /* ecc error */ + u8 tx_ecc_err : 1; + u8 rsv1 : 1; + u8 rsv0 : 1; + /* pcie error */ + u8 tx_pcie_read_err : 1; +}; + +struct ne6x_tx_desc { + union { + /* Hardware write back*/ + struct ne6x_tx_desc_status flags; + u8 val; + } u; + + u8 rsv0 : 1; + u8 vp : 7; + u8 event_trigger : 1; + u8 chain : 1; + u8 transmit_type : 2; + u8 sop_valid : 1; + u8 eop_valid : 1; + u8 tso : 1; + u8 rsv1 : 1; + u8 rsv2; + u8 rsv3; + + u8 l3_csum : 1; + u8 l3_ofst : 7; + u8 l4_csum : 1; + u8 l4_ofst : 7; + u8 pld_ofst; + + __le64 mop_cnt : 24; + __le64 sop_cnt : 16; + __le64 rsv4 : 8; + __le64 mss : 16; + __le64 buffer_mop_addr; + __le64 buffer_sop_addr; +}; + +struct ne6x_tx_tag { + u8 resv0; + u8 tag_pi1 : 1; + u8 resv1 : 7; + u8 l3_csum : 1; + u8 l4_csum : 1; + u8 vxl_l3_csum : 1; + u8 vxl_l4_csum : 1; + u8 tag_resv : 3; + u8 tag_pi0 : 1; + u8 tag_vport; + u16 tag_vlan1; /* 1q vlan */ + u16 tag_vlan2; /* 1ad vlan */ + + __le64 resv2 : 32; + __le64 tag_num : 16; + __le64 tag_mss : 16; /* mss */ + + u8 l3_ofst; + u8 l4_ofst; + u16 l4_len; /* l4hdr + pld_size */ + u8 vxl_l3_ofst; + u8 vxl_l4_ofst; + u16 vxl_l4_len; /* l4hdr + pld_size */ + + __le64 resv3; +}; + +struct ne6x_tx_buf { + struct ne6x_tx_desc *next_to_watch; + struct sk_buff *skb; + u32 bytecount; + u8 jumbo_frame; /* fragment when bytecount > 15.5KB*/ + u8 jumbo_finsh; /* when last frame of jumbo packet transmitted, set it 1 */ + u16 rsv; + int napi_budget; /* when bytecount > 15.5KB, accumulating NPAI trigger count + * in transmit irq handler + */ + u16 gso_segs; + dma_addr_t tag_dma; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct ne6x_rx_desc_status { + u8 rx_mem_err : 1; /* MOP_MEM_ADDR/SOP_MEM_ADDR/MOP_MEM_LEN=0, pkt need drop */ + u8 rx_mem_ovflow : 1; /* SOP_MEM_OVFLOW ==1, mop have pkt */ + u8 rsv : 1; + u8 rx_eop : 1; /* EOP flag */ + u8 rx_csum_err : 1; /* checksum error */ + u8 rx_err : 1; /* Not enough descriptors */ + u8 rx_mem_used : 1; /* MEM_USED, Normal */ + u8 pd_type : 1; /* 0 ingress pd, 1 egress pd */ +}; + +#define NE6X_RX_DESC_STATUS_EOF_SHIFT 3 +#define NE6X_RX_DESC_STATUS_ERR_SHIFT 0 + +/* Receive Descriptor */ +union ne6x_rx_desc { + struct { + u8 rsv3; + u8 rsv2 : 1; + u8 vp : 7; + __le16 mop_mem_len; + __le16 sop_mem_len; + __le16 rsv1; + __le64 buffer_sop_addr; + __le64 buffer_mop_addr; + + __le64 rsv0; + } w; /* write */ + + struct { + union { + struct ne6x_rx_desc_status flags; + u8 val; + } u; + u8 rsv2 : 1; + u8 vp : 7; + u8 pd[24]; + __le16 rsv0; + __le16 rsv1; + __le16 pkt_len; + } wb; /* Writeback */ +}; + +struct ne6x_tx_cq_desc { + u8 cq_tx_stats; + u16 cq_tx_offset; +} __packed; + +struct ne6x_rx_cq_desc { + u8 cq_rx_stats; + u16 cq_rx_len; + u16 cq_rx_offset; +} __packed; + +struct ne6x_cq_desc { + u8 ctype : 1; + u8 rsv0 : 3; + u8 num : 4; + u8 rsv1; + + union { + struct ne6x_tx_cq_desc tx_cq[10]; + struct ne6x_rx_cq_desc rx_cq[6]; + u8 data[30]; + } payload; +}; + +struct ne6x_rx_buf { + dma_addr_t dma; + struct page *page; + u32 page_offset; + u16 pagecnt_bias; +}; + +struct ne6x_q_stats { + u64 packets; + u64 bytes; +}; + +struct ne6x_txq_stats { + u64 restart_q; + u64 tx_busy; + u64 tx_linearize; + u64 csum_err; + u64 csum_good; + u64 tx_pcie_read_err; + u64 tx_ecc_err; + u64 tx_drop_addr; +}; + +struct ne6x_rxq_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buf_failed; + u64 page_reuse_count; + u64 csum_err; + u64 csum_good; + u64 rx_mem_error; + u64 rx_err; +}; + +struct ne6x_cq_stats { + u64 cq_num; + u64 tx_num; + u64 rx_num; +}; + +#define NE6X_SG_SOP_FLAG BIT(0) +#define NE6X_SG_EOP_FLAG BIT(1) +#define NE6X_SG_FST_SG_FLAG BIT(13) +#define NE6X_SG_LST_SG_FLAG BIT(14) +#define NE6X_SG_JUMBO_FLAG BIT(15) +#define NE6X_SG_FRAG_FLAG BIT(4) +#define NE6X_MAX_DESC_NUM_PER_SKB 16 + +struct ne6x_sg_info { + void *p; + u16 offset; + u16 len; + u16 flag; + u16 base_mss_no; +}; + +struct ne6x_sg_list { + u16 sg_num; + u16 mss; + u16 sgl_mss_cnt; + struct ne6x_sg_info sg[NE6X_MAX_DESC_NUM_PER_SKB]; +}; + +/* descriptor ring, associated with a adapter */ +struct ne6x_ring { + /* CL1 - 1st cacheline starts here */ + void *adpt; + struct ne6x_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + struct ne6x_q_vector *q_vector; /* Backreference to associated vector */ + + u64 __iomem *tail; + + struct ne6x_sg_list *sgl; + + union { + struct ne6x_tx_buf *tx_buf; + struct ne6x_rx_buf *rx_buf; + }; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + u16 cq_last_expect; + + u16 queue_index; /* Queue number of ring */ + u16 rx_buf_len; + + /* stats structs */ + struct ne6x_q_stats stats; + struct u64_stats_sync syncp; + + union { + struct ne6x_txq_stats tx_stats; + struct ne6x_rxq_stats rx_stats; + struct ne6x_cq_stats cq_stats; + }; + + struct rcu_head rcu; /* to avoid race on free */ + dma_addr_t dma; /* physical address of ring */ + unsigned int size; /* length of descriptor ring in bytes */ + struct sk_buff *skb; /* When ne6x_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * ne6x_clean_rx_ring_irq() is called + * for this ring. + */ +} ____cacheline_internodealigned_in_smp; + +struct ne6x_ring_container { + /* head of linked-list of rings */ + struct ne6x_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; +}; + +union rx_ol_flags { + u32 ol_flags; /* Offload Feature Bits. */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 ol_flag_rx_vlan :1; + u32 rx_ip_cksum_bad :1; + u32 rx_ip_cksum_good :1; + u32 rx_l4_cksum_bad :1; + u32 rx_l4_cksum_good :1; + u32 rx_rss_hash :1; + u32 rx_qinq :1; + u32 rx_lro :1; + u32 rx_vlan_striped :1; + u32 rx_qinq_striped :1; + u32 rx_dvlan :1; + u32 rx_vlan_bad :1; + u32 rx_inner_ip_cksum_bad :1; + u32 rx_inner_ip_cksum_good :1; + u32 rx_inner_l4_cksum_bad :1; + u32 rx_inner_l4_cksum_good :1; + u32 rx_tnl_csum :1; + u32 rsv0 :1; + u32 tag_num :8; + u32 rsv1 :6; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u32 rsv1 :6; + u32 tag_num :8; + u32 rsv0 :1; + u32 rx_tnl_csum :1; + u32 rx_vlan_striped :1; + u32 rx_qinq_striped :1; + u32 rx_dvlan :1; + u32 rx_vlan_bad :1; + u32 rx_inner_ip_cksum_bad :1; + u32 rx_inner_ip_cksum_good :1; + u32 rx_inner_l4_cksum_bad :1; + u32 rx_inner_l4_cksum_good :1; + u32 ol_flag_rx_vlan :1; + u32 rx_ip_cksum_bad :1; + u32 rx_ip_cksum_good :1; + u32 rx_l4_cksum_bad :1; + u32 rx_l4_cksum_good :1; + u32 rx_rss_hash :1; + u32 rx_qinq :1; + u32 rx_lro :1; +#endif + } flag_bits; +}; + +struct rx_hdr_info { + union rx_ol_flags ol_flag; + u32 rss_hash; /* RSS Hash Value */ + u32 vlan_tci_outer:16; /* VLAN Outer Tag Control Identifier */ + u32 vlan_tci:16; /* VLAN Tag Control Identifier */ +}; + +#define NE6X_INT_NAME_STR_LEN (IFNAMSIZ + 16) + +/* struct that defines an interrupt vector */ +struct ne6x_q_vector { + void *adpt; + + u16 v_idx; /* index in the adpt->q_vector array. */ + u16 reg_idx; + + struct napi_struct napi; + + struct ne6x_ring_container rx; + struct ne6x_ring_container tx; + struct ne6x_ring_container cq; + struct ne6x_ring_container tg; + + u8 num_ringpairs; /* total number of ring pairs in vector */ + + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + + char name[NE6X_INT_NAME_STR_LEN]; +} ____cacheline_internodealigned_in_smp; + +#define DESC_NEEDED (MAX_SKB_FRAGS + 6) + +static inline unsigned int ne6x_rx_pg_order(struct ne6x_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define ne6x_rx_pg_size(_ring) (PAGE_SIZE << ne6x_rx_pg_order(_ring)) + +static inline struct netdev_queue *txring_txq(const struct ne6x_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +int ne6x_clean_cq_irq(struct ne6x_q_vector *q_vector, struct ne6x_ring *cq_ring, int napi_budget); +int ne6x_clean_rx_irq(struct ne6x_ring *rx_ring, int budget); +int ne6x_clean_tx_irq(struct ne6x_adapt_comm *comm, struct ne6x_ring *tx_ring, int napi_budget); +netdev_tx_t ne6x_xmit_frame_ring(struct sk_buff *skb, struct ne6x_ring *tx_ring, + struct ne6x_ring *tag_ring, bool jumbo_frame); +void ne6x_tail_update(struct ne6x_ring *ring, int val); +int ne6x_setup_tx_descriptors(struct ne6x_ring *tx_ring); +int ne6x_setup_rx_descriptors(struct ne6x_ring *rx_ring); +int ne6x_setup_cq_descriptors(struct ne6x_ring *cq_ring); +int ne6x_setup_tg_descriptors(struct ne6x_ring *tg_ring); +int ne6x_setup_tx_sgl(struct ne6x_ring *tx_ring); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/version.h b/drivers/net/ethernet/bzwx/nce/comm/version.h new file mode 100644 index 000000000000..9affdb9803b1 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _VERSION_H +#define _VERSION_H + +#define VERSION "1.0.4" + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h new file mode 100644 index 000000000000..1206d8ab3cfd --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h @@ -0,0 +1,468 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_H +#define _NE6X_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "feature.h" +#include "txrx.h" +#include "common.h" +#include "ne6x_txrx.h" +#include "ne6x_ethtool.h" +#include "ne6x_procfs.h" +#include "ne6x_virtchnl_pf.h" +#include "version.h" + +#define NE6X_MAX_VP_NUM 64 +#define NE6X_PF_VP0_NUM 64 +#define NE6X_PF_VP1_NUM 65 +#define NE6X_MAILBOX_VP_NUM NE6X_PF_VP0_NUM +#define NE6X_MAX_MSIX_NUM 72 +#define NE6X_MIN_MSIX 2 + +#define NE6X_NIC_INT_VP 71 +#define NE6X_NIC_INT_START_BIT 42 + +#define wr64(a, reg, value) \ + writeq((value), ((void __iomem *)((a)->hw_addr0) + (reg))) +#define rd64(a, reg) \ + readq((void __iomem *)((a)->hw_addr0) + (reg)) +#define wr64_bar4(a, reg, value) \ + writeq((value), ((void __iomem *)((a)->hw_addr4) + (reg))) +#define rd64_bar4(a, reg) \ + readq((void __iomem *)((a)->hw_addr4) + (reg)) + +#define ne6x_pf_to_dev(pf) (&((pf)->pdev->dev)) +#define ne6x_get_vf_by_id(pf, vf_id) (&((pf)->vf[vf_id])) + +#define ADPT_PPORT(adpt) ((adpt)->port_info->hw_port_id) +#define ADPT_LPORT(adpt) ((adpt)->port_info->lport) +#define ADPT_VPORT(adpt) ((adpt)->vport) +#define ADPT_VPORTCOS(adpt) ((adpt)->base_queue + 160) + +enum ne6x_adapter_type { + NE6X_ADPT_PF = 0, + NE6X_ADPT_VF, +}; + +enum ne6x_adapter_flags { + NE6X_ADPT_F_DISABLE_FW_LLDP, + NE6X_ADPT_F_LINKDOWN_ON_CLOSE, + NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, + NE6X_ADPT_F_DDOS_SWITCH, + NE6X_ADPT_F_ACL, + NE6X_ADPT_F_TRUST_VLAN, + NE6X_ADPT_F_NBITS /* must be last */ +}; + +enum ne6x_pf_state { + NE6X_TESTING, + NE6X_DOWN, + NE6X_SERVICE_SCHED, + NE6X_INT_INIT_DOWN, + NE6X_CLIENT_SERVICE_REQUESTED, + NE6X_LINK_POOLING, + NE6X_CONFIG_BUSY, + NE6X_TIMEOUT_RECOVERY_PENDING, + NE6X_PF_RESET_REQUESTED, + NE6X_CORE_RESET_REQUESTED, + NE6X_GLOBAL_RESET_REQUESTED, + NE6X_RESET_INTR_RECEIVED, + NE6X_DOWN_REQUESTED, + NE6X_VF_DIS, + NE6X_MAILBOXQ_EVENT_PENDING, + NE6X_PF_INTX, + NE6X_PF_MSI, + NE6X_PF_MSIX, + NE6X_FLAG_SRIOV_ENA, + NE6X_REMOVE, + NE6X_STATE_NBITS /* must be last */ +}; + +enum { + NE6X_ETHTOOL_FLASH_810_LOADER = 0, + NE6X_ETHTOOL_FLASH_810_APP = 1, + NE6X_ETHTOOL_FLASH_807_APP = 2, + NE6X_ETHTOOL_FLASH_NP = 3, + NE6X_ETHTOOL_FLASH_PXE = 4, + NE6X_ETHTOOL_FRU = 0xf2, +}; + +/* MAC addr list head node struct */ +struct mac_addr_head { + struct list_head list; + struct mutex mutex; /* mutex */ +}; + +/* MAC addr list node struct */ +struct mac_addr_node { + struct list_head list; + u8 addr[32]; +}; + +/* values for UPT1_RSSConf.hashFunc */ +enum { + NE6X_FW_VER_NORMAL = 0x0, + NE6X_FW_VER_WHITELIST = 0x100, +}; + +struct ne6x_lump_tracking { + u16 num_entries; + u16 list[]; +}; + +struct ne6x_hw_port_stats { + u64 mac_rx_eth_byte; + u64 mac_rx_eth; + u64 mac_rx_eth_undersize; + u64 mac_rx_eth_crc; + u64 mac_rx_eth_64b; + u64 mac_rx_eth_65_127b; + u64 mac_rx_eth_128_255b; + u64 mac_rx_eth_256_511b; + u64 mac_rx_eth_512_1023b; + u64 mac_rx_eth_1024_15360b; + u64 mac_tx_eth_byte; + u64 mac_tx_eth; + u64 mac_tx_eth_undersize; + u64 mac_tx_eth_64b; + u64 mac_tx_eth_65_127b; + u64 mac_tx_eth_128_255b; + u64 mac_tx_eth_256_511b; + u64 mac_tx_eth_512_1023b; + u64 mac_tx_eth_1024_15360b; +}; + +/* struct that defines a adapter, associated with a dev */ +struct ne6x_adapter { + struct ne6x_adapt_comm comm; + struct net_device *netdev; + struct ne6x_pf *back; /* back pointer to PF */ + struct ne6x_port_info *port_info; /* back pointer to port_info */ + struct ne6x_ring **rx_rings; /* Rx ring array */ + struct ne6x_ring **tx_rings; /* Tx ring array */ + struct ne6x_ring **cq_rings; /* Tx ring array */ + struct ne6x_ring **tg_rings; /* Tx tag ring array */ + struct ne6x_q_vector **q_vectors; /* q_vector array */ + + /* used for loopback test */ + char *send_buffer; + wait_queue_head_t recv_notify; + u8 recv_done; + + irqreturn_t (*irq_handler)(int irq, void *data); + + u32 tx_restart; + u32 tx_busy; + u32 rx_buf_failed; + u32 rx_page_failed; + u16 num_q_vectors; + u16 base_vector; /* IRQ base for OS reserved vectors */ + enum ne6x_adapter_type type; + struct ne6x_vf *vf; /* VF associated with this adapter */ + u16 idx; /* software index in pf->adpt[] */ + u16 max_frame; + u16 rx_buf_len; + struct rtnl_link_stats64 net_stats; + struct rtnl_link_stats64 net_stats_offsets; + struct ne6x_eth_stats eth_stats; + struct ne6x_eth_stats eth_stats_offsets; + struct ne6x_rss_info rss_info; + int rss_size; + + bool irqs_ready; + bool current_isup; /* Sync 'link up' logging */ + u16 current_speed; + u16 vport; + u16 num_queue; /* Used queues */ + u16 base_queue; /* adapter's first queue in hw array */ + u16 num_tx_desc; + u16 num_rx_desc; + u16 num_cq_desc; + u16 num_tg_desc; + + u32 hw_feature; + bool netdev_registered; + + /* unicast MAC head node */ + struct mac_addr_head uc_mac_addr; + /* multicast MAC head node */ + struct mac_addr_head mc_mac_addr; + + struct work_struct set_rx_mode_task; + + struct ne6x_hw_port_stats stats; + DECLARE_BITMAP(flags, NE6X_ADPT_F_NBITS); + + struct list_head vlan_filter_list; + struct list_head macvlan_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + + /* aRFS members only allocated for the PF ADPT */ +#define NE6X_MAX_RFS_FILTERS 0xFFFF +#define NE6X_MAX_ARFS_LIST 1024 +#define NE6X_ARFS_LST_MASK (NE6X_MAX_ARFS_LIST - 1) + struct hlist_head *arfs_fltr_list; + struct ne6x_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ + atomic_t *arfs_last_fltr_id; +} ____cacheline_internodealigned_in_smp; + +struct ne6x_dev_eeprom_info { + u8 vendor_id[3]; + u8 ocp_record_version; + u8 max_power_s0; + u8 max_power_s5; + u8 hot_card_cooling_passive_tier; + u8 cold_card_cooling_passive_tier; + u8 cooling_mode; + u16 hot_standby_airflow_require; + u16 cold_standby_airflow_require; + u8 uart_configuration_1; + u8 uart_configuration_2; + u8 usb_present; + u8 manageability_type; + u8 fru_write_protection; + u8 prog_mode_power_state_supported; + u8 hot_card_cooling_active_tier; + u8 cold_card_cooling_active_tier; + u8 transceiver_ref_power_Level; + u8 transceiver_ref_temp_Level; + u8 card_thermal_tier_with_local_fan_fail; + u16 product_mode; + u8 is_pcie_exist; + u32 logic_port_to_phyical; + u8 resv[3]; + u8 number_of_physical_controllers; + u8 control_1_udid[16]; + u8 control_2_udid[16]; + u8 control_3_udid[16]; + u8 control_4_udid[16]; + u32 hw_feature; + u32 hw_flag; + u8 port_0_mac[6]; + u8 port_1_mac[6]; + u8 port_2_mac[6]; + u8 port_3_mac[6]; + u8 rsv[9]; + u32 spd_verify_value; +} __packed; + +struct ne6x_hw { + u64 __iomem *hw_addr0; + u64 __iomem *hw_addr2; + u64 __iomem *hw_addr4; + + struct ne6x_port_info *port_info; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 dvm_ena; /* double vlan enable */ + struct ne6x_pf *back; + struct ne6x_bus_info bus; + u16 pf_port; + + u32 expect_vp; + u32 max_queue; + + struct ne6x_mbx_snapshot mbx_snapshot; + u8 ne6x_mbx_ready_to_send[64]; +}; + +#define ne6x_hw_to_dev(ptr) (&(container_of((ptr), struct ne6x_pf, hw))->pdev->dev) + +struct ne6x_firmware_ver_info { + u32 firmware_soc_ver; + u32 firmware_np_ver; + u32 firmware_pxe_ver; +}; + +/* struct that defines the Ethernet device */ +struct ne6x_pf { + struct pci_dev *pdev; + + /* OS reserved IRQ details */ + struct msix_entry *msix_entries; + u16 ctrl_adpt_idx; /* control adapter index in pf->adpt array */ + + struct ne6x_adapter **adpt; /* adapters created by the driver */ + + struct mutex switch_mutex; /* switch_mutex */ + struct mutex mbus_comm_mutex; /* mbus_comm_mutex */ + struct timer_list serv_tmr; + struct timer_list linkscan_tmr; + unsigned long service_timer_period; + struct work_struct serv_task; + struct work_struct linkscan_work; + + /* Virtchnl/SR-IOV config info */ + struct ne6x_vf *vf; + u16 num_alloc_vfs; + u16 num_qps_per_vf; + + u16 next_adpt; /* Next free slot in pf->adpt[] - 0-based! */ + u16 num_alloc_adpt; + + DECLARE_BITMAP(state, NE6X_STATE_NBITS); + + u32 tx_timeout_count; + u32 tx_timeout_recovery_level; + unsigned long tx_timeout_last_recovery; + struct ne6x_firmware_ver_info verinfo; + struct ne6x_dev_eeprom_info sdk_spd_info; + + struct ne6x_hw hw; + struct ne6x_lump_tracking *irq_pile; +#ifdef CONFIG_DEBUG_FS + struct dentry *ne6x_dbg_pf; + struct dentry *ne6x_dbg_info_pf; +#endif /* CONFIG_DEBUG_FS */ + struct proc_dir_entry *ne6x_proc_pf; + struct list_head key_filter_list; + spinlock_t key_list_lock; /* Lock to protect accesses to key filter */ + + char link_intname[NE6X_INT_NAME_STR_LEN]; + char mailbox_intname[NE6X_INT_NAME_STR_LEN]; + bool link_int_irq_ready; + bool mailbox_int_irq_ready; + bool is_fastmode; + u32 hw_flag; + u32 dump_info; + u16 dev_type; +}; + +static inline void ne6x_adpt_setup_irqhandler(struct ne6x_adapter *adpt, + irqreturn_t (*irq_handler)(int, void *)) +{ + adpt->irq_handler = irq_handler; +} + +struct ne6x_netdev_priv { + struct ne6x_adapter *adpt; +}; + +static inline bool ne6x_is_supported_port_vlan_proto(struct ne6x_hw *hw, + u16 vlan_proto) +{ + bool is_supported = false; + + switch (vlan_proto) { + case ETH_P_8021Q: + is_supported = true; + break; + case ETH_P_8021AD: + if (hw->dvm_ena) + is_supported = true; + break; + } + + return is_supported; +} + +static inline struct ne6x_pf *ne6x_netdev_to_pf(struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + + return np->adpt->back; +} + +static inline struct ne6x_adapter *ne6x_netdev_to_adpt(struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + + return np->adpt; +} + +#define NE6X_VLAN(tpid, vid, prio) \ + ((struct ne6x_vlan){ tpid, vid, prio }) + +struct rtnl_link_stats64 *ne6x_get_adpt_stats_struct(struct ne6x_adapter *adpt); + +void ne6x_switch_pci_write(void *bar_base, u32 base_addr, u32 offset_addr, u64 reg_value); +u64 ne6x_switch_pci_read(void *bar_base, u32 base_addr, u32 offset_addr); +int ne6x_adpt_restart_vp(struct ne6x_adapter *adpt, bool enable); +void ne6x_update_pf_stats(struct ne6x_adapter *adpt); +void ne6x_service_event_schedule(struct ne6x_pf *pf); + +void ne6x_down(struct ne6x_adapter *adpt); +int ne6x_up(struct ne6x_adapter *adpt); +int ne6x_adpt_configure(struct ne6x_adapter *adpt); +void ne6x_adpt_close(struct ne6x_adapter *adpt); + +int ne6x_alloc_rings(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_tx(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_rx(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_cq(struct ne6x_adapter *adpt); +void ne6x_adpt_clear_rings(struct ne6x_adapter *adpt); +int ne6x_adpt_setup_tx_resources(struct ne6x_adapter *adpt); +int ne6x_adpt_setup_rx_resources(struct ne6x_adapter *adpt); + +int ne6x_close(struct net_device *netdev); +int ne6x_open(struct net_device *netdev); +int ne6x_adpt_open(struct ne6x_adapter *adpt); +int ne6x_adpt_mem_alloc(struct ne6x_pf *pf, struct ne6x_adapter *adpt); +void ne6x_adpt_map_rings_to_vectors(struct ne6x_adapter *adpt); +void ne6x_adpt_reset_stats(struct ne6x_adapter *adpt); +void ne6x_adpt_free_arrays(struct ne6x_adapter *adpt, bool free_qvectors); +int ne6x_adpt_register_netdev(struct ne6x_adapter *adpt); +bool netif_is_ne6x(struct net_device *dev); + +int ne6x_validata_tx_rate(struct ne6x_adapter *adpt, int vf_id, int min_tx_rate, int max_tx_rate); + +int ne6x_del_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); +struct ne6x_vlan_filter *ne6x_add_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); + +struct ne6x_key_filter *ne6x_add_key_list(struct ne6x_pf *pf, struct ne6x_key key); +int ne6x_del_key_list(struct ne6x_pf *pf, struct ne6x_key key); +int ne6x_add_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size); +int ne6x_del_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size); + +int ne6x_adpt_add_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); +int ne6x_adpt_del_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); + +void ne6x_sync_features(struct net_device *netdev); + +int ne6x_adpt_add_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast); +int ne6x_adpt_del_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast); + +int ne6x_adpt_clear_mac_vlan(struct ne6x_adapter *adpt); +void ne6x_adpt_clear_ddos(struct ne6x_pf *pf); +void ne6x_linkscan_schedule(struct ne6x_pf *pf); + +ssize_t ne6x_proc_tps_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c new file mode 100644 index 000000000000..fe2fd42ffdda --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c @@ -0,0 +1,631 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_arfs.h" + +static void +ne6x_arfs_update_active_fltr_cntrs(struct ne6x_adapter *adpt, + struct ne6x_arfs_entry *entry, bool add); + +int ne6x_dev_add_fster_rules(struct ne6x_adapter *adpt, struct ne6x_fster_fltr *input, bool is_tun) +{ + u32 table_id = 0xffffffff; + struct ne6x_fster_table fster; + struct ne6x_fster_search_result result; + u32 *fster_data = (u32 *)&fster; + int ret = 0, index; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + dev_info(dev, "add: vport: %d %x %x %x %x %d %d rxq_id:%d\n", adpt->vport, + input->ip.v4.dst_ip, input->ip.v4.src_ip, input->ip.v4.dst_port, + input->ip.v4.src_port, input->ip.v4.pi, input->ip.v4.proto, input->q_index); + + memset(&fster, 0x00, sizeof(struct ne6x_fster_table)); + /* hash key */ + memcpy(&fster.ip, &input->ip, sizeof(fster.ip)); + /* hash data */ + memcpy(&fster.data, &input->data, sizeof(fster.data)); + + /* flow steer info */ + for (index = 0; index < 24; index++) + fster_data[index] = cpu_to_be32(fster_data[index]); + + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster.ip), (u32 *)&result, 32); + + if (ret == -ENOENT) { + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster), &table_id); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "insert flow steer table fail %02x\n", + ADPT_LPORT(adpt)); + } else { + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_ARFS_TABLE, result.key_index + 8, + (u32 *)&fster.data, sizeof(fster.data)); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "update flow steer table fail ret:%d\n", + ret); + } + + return 0; +} + +int ne6x_dev_del_fster_rules(struct ne6x_adapter *adpt, struct ne6x_fster_fltr *input, bool is_tun) +{ + struct ne6x_fster_table fster; + struct ne6x_fster_search_result result; + u32 *fster_data = (u32 *)&fster; + int ret = 0, index; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + dev_info(dev, "del: vport: %d %x %x %x %x %d %d rxq_id:%d\n", + adpt->vport, input->ip.v4.dst_ip, input->ip.v4.src_ip, input->ip.v4.dst_port, + input->ip.v4.src_port, input->ip.v4.pi, input->ip.v4.proto, input->q_index); + + memset(&fster, 0x00, sizeof(struct ne6x_fster_table)); + /* hash key */ + memcpy(&fster.ip, &input->ip, sizeof(fster.ip)); + + /* flow steer info */ + for (index = 0; index < 16; index++) + fster_data[index] = cpu_to_be32(fster_data[index]); + + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster.ip), (u32 *)&result, 32); + if (!ret) { + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_ARFS_TABLE, + (u32 *)&fster.ip, sizeof(fster.ip)); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "delete flow steer table fail ret:%d\n", + ret); + } else { + dev_err(ne6x_pf_to_dev(adpt->back), "search flow steer table fail ret:%d\n", ret); + } + return 0; +} + +static bool ne6x_is_arfs_active(struct ne6x_adapter *adpt) +{ + return !!adpt->arfs_fltr_list; +} + +static bool +ne6x_arfs_is_flow_expired(struct ne6x_adapter *adpt, struct ne6x_arfs_entry *arfs_entry) +{ +#define NE6X_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) + if (rps_may_expire_flow(adpt->netdev, arfs_entry->fltr_info.q_index, + arfs_entry->flow_id, + arfs_entry->fltr_info.fltr_id)) + return true; + + /* expiration timer only used for UDP filters */ + if (arfs_entry->fltr_info.flow_type != NE6X_FLTR_PTYPE_NONF_IPV4_UDP && + arfs_entry->fltr_info.flow_type != NE6X_FLTR_PTYPE_NONF_IPV6_UDP) + return false; + + return time_in_range64(arfs_entry->time_activated + + NE6X_ARFS_TIME_DELTA_EXPIRATION, + arfs_entry->time_activated, get_jiffies_64()); +} + +static void +ne6x_arfs_update_flow_rules(struct ne6x_adapter *adpt, u16 idx, + struct hlist_head *add_list, + struct hlist_head *del_list) +{ + struct ne6x_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + /* go through the aRFS hlist at this idx and check for needed updates */ + hlist_for_each_entry_safe(e, n, &adpt->arfs_fltr_list[idx], list_entry) { + /* check if filter needs to be added to HW */ + if (e->fltr_state == NE6X_ARFS_INACTIVE) { + enum ne6x_fltr_ptype flow_type = e->fltr_info.flow_type; + struct ne6x_arfs_entry_ptr *ep = + devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC); + + if (!ep) + continue; + INIT_HLIST_NODE(&ep->list_entry); + /* reference aRFS entry to add HW filter */ + ep->arfs_entry = e; + hlist_add_head(&ep->list_entry, add_list); + e->fltr_state = NE6X_ARFS_ACTIVE; + /* expiration timer only used for UDP flows */ + if (flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == NE6X_FLTR_PTYPE_NONF_IPV6_UDP) + e->time_activated = get_jiffies_64(); + } else if (e->fltr_state == NE6X_ARFS_ACTIVE) { + /* check if filter needs to be removed from HW */ + if (ne6x_arfs_is_flow_expired(adpt, e)) { + /* remove aRFS entry from hash table for delete + * and to prevent referencing it the next time + * through this hlist index + */ + hlist_del(&e->list_entry); + e->fltr_state = NE6X_ARFS_TODEL; + /* save reference to aRFS entry for delete */ + hlist_add_head(&e->list_entry, del_list); + } + } + } +} + +int ne6x_arfs_add_flow_rules(struct ne6x_adapter *adpt, struct hlist_head *add_list_head) +{ + struct ne6x_arfs_entry_ptr *ep; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) { + int result; + + result = ne6x_dev_add_fster_rules(adpt, &ep->arfs_entry->fltr_info, false); + if (!result) + ne6x_arfs_update_active_fltr_cntrs(adpt, ep->arfs_entry, true); + else + dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, ep->arfs_entry->fltr_state, + ep->arfs_entry->fltr_info.fltr_id, + ep->arfs_entry->flow_id, + ep->arfs_entry->fltr_info.q_index); + + hlist_del(&ep->list_entry); + devm_kfree(dev, ep); + } + + return 0; +} + +int ne6x_arfs_del_flow_rules(struct ne6x_adapter *adpt, struct hlist_head *del_list_head) +{ + struct ne6x_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + hlist_for_each_entry_safe(e, n, del_list_head, list_entry) { + int result; + + result = ne6x_dev_del_fster_rules(adpt, &e->fltr_info, false); + if (!result) + ne6x_arfs_update_active_fltr_cntrs(adpt, e, false); + else + dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, e->fltr_state, e->fltr_info.fltr_id, + e->flow_id, e->fltr_info.q_index); + + /* The aRFS hash table is no longer referencing this entry */ + hlist_del(&e->list_entry); + devm_kfree(dev, e); + } + + return 0; +} + +void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf) +{ + struct ne6x_adapter *pf_adpt; + unsigned int i; + u8 idx = 0; + + ne6x_for_each_pf(pf, idx) { + HLIST_HEAD(tmp_del_list); + HLIST_HEAD(tmp_add_list); + + pf_adpt = pf->adpt[idx]; + + if (!pf_adpt) + continue; + + if (unlikely(!(pf_adpt->netdev->features & NETIF_F_NTUPLE))) + continue; + + if (!ne6x_is_arfs_active(pf_adpt)) + continue; + + spin_lock_bh(&pf_adpt->arfs_lock); + /* Once we process aRFS for the PF ADPT get out */ + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) + ne6x_arfs_update_flow_rules(pf_adpt, i, &tmp_add_list, + &tmp_del_list); + spin_unlock_bh(&pf_adpt->arfs_lock); + + /* use list of ne6x_arfs_entry(s) for delete */ + ne6x_arfs_del_flow_rules(pf_adpt, &tmp_del_list); + + /* use list of ne6x_arfs_entry(s) for add */ + ne6x_arfs_add_flow_rules(pf_adpt, &tmp_add_list); + } +} + +static void +ne6x_arfs_update_active_fltr_cntrs(struct ne6x_adapter *adpt, + struct ne6x_arfs_entry *entry, bool add) +{ + struct ne6x_arfs_active_fltr_cntrs *fltr_cntrs = adpt->arfs_fltr_cntrs; + + switch (entry->fltr_info.flow_type) { + case NE6X_FLTR_PTYPE_NONF_IPV4_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv4_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV6_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv6_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV4_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv4_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV6_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv6_cnt); + break; + default: + dev_err(ne6x_pf_to_dev(adpt->back), "aRFS: Failed to update filter counters, invalid filter type %d\n", + entry->fltr_info.flow_type); + } +} + +static bool +ne6x_arfs_cmp(struct ne6x_fster_fltr *fltr_info, const struct flow_keys *fk) +{ + bool is_v4; + + if (!fltr_info || !fk) + return false; + + is_v4 = (fltr_info->flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_UDP || + fltr_info->flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_TCP); + + if (fk->basic.n_proto == htons(ETH_P_IP) && is_v4) + return (fltr_info->ip.v4.proto == fk->basic.ip_proto && + fltr_info->ip.v4.src_port == fk->ports.src && + fltr_info->ip.v4.dst_port == fk->ports.dst && + fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src && + fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst); + + else if (fk->basic.n_proto == htons(ETH_P_IPV6) && !is_v4) + return (fltr_info->ip.v6.proto == fk->basic.ip_proto && + fltr_info->ip.v6.src_port == fk->ports.src && + fltr_info->ip.v6.dst_port == fk->ports.dst && + !memcmp(&fltr_info->ip.v6.src_ip, + &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&fltr_info->ip.v6.dst_ip, + &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr))); + + return false; +} + +static struct ne6x_arfs_entry * +ne6x_arfs_build_entry(struct ne6x_adapter *adpt, const struct flow_keys *fk, + u32 hash, u16 rxq_idx, u32 flow_id) +{ + struct ne6x_arfs_entry *arfs_entry; + struct ne6x_fster_fltr *fltr_info; + u8 ip_proto; + + arfs_entry = devm_kzalloc(ne6x_pf_to_dev(adpt->back), + sizeof(*arfs_entry), + GFP_ATOMIC | __GFP_NOWARN); + if (!arfs_entry) + return NULL; + + fltr_info = &arfs_entry->fltr_info; + fltr_info->q_index = rxq_idx; + fltr_info->dest_adpt = adpt->idx; + ip_proto = fk->basic.ip_proto; + + if (fk->basic.n_proto == htons(ETH_P_IP)) { + fltr_info->ip.v4.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + NE6X_FLTR_PTYPE_NONF_IPV4_TCP : + NE6X_FLTR_PTYPE_NONF_IPV4_UDP; + fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src; + fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst; + fltr_info->ip.v4.src_port = fk->ports.src; + fltr_info->ip.v4.dst_port = fk->ports.dst; + fltr_info->ip.v4.proto = fk->basic.ip_proto; + fltr_info->ip.v4.pi = ADPT_LPORT(adpt); + } else { /* ETH_P_IPV6 */ + fltr_info->ip.v6.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + NE6X_FLTR_PTYPE_NONF_IPV6_TCP : + NE6X_FLTR_PTYPE_NONF_IPV6_UDP; + memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); + fltr_info->ip.v6.src_port = fk->ports.src; + fltr_info->ip.v6.dst_port = fk->ports.dst; + fltr_info->ip.v6.proto = fk->basic.ip_proto; + fltr_info->ip.v6.pi = ADPT_LPORT(adpt); + } + fltr_info->data.tab_id = 5; + fltr_info->data.port = ADPT_VPORT(adpt); + fltr_info->data.cos = cpu_to_be16(rxq_idx); + fltr_info->data.hash = hash; + + arfs_entry->flow_id = flow_id; + fltr_info->fltr_id = + atomic_inc_return(adpt->arfs_last_fltr_id) % RPS_NO_FILTER; + + return arfs_entry; +} + +void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt) +{ + struct net_device *netdev; + + if (!adpt) + return; + + netdev = adpt->netdev; + if (!netdev || !netdev->rx_cpu_rmap) + return; + + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; +} + +int ne6x_get_irq_num(struct ne6x_pf *pf, int idx) +{ + if (!pf->msix_entries) + return -EINVAL; + + return pf->msix_entries[idx].vector; +} + +int ne6x_set_cpu_rx_rmap(struct ne6x_adapter *adpt) +{ + struct net_device *netdev; + struct ne6x_pf *pf; + int base_idx, i; + + pf = adpt->back; + + netdev = adpt->netdev; + if (!pf || !netdev || !adpt->num_q_vectors) + return -EINVAL; + + netdev_dbg(netdev, "Setup CPU RMAP: adpt type 0x%x, ifname %s, q_vectors %d\n", + adpt->type, netdev->name, adpt->num_q_vectors); + + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adpt->num_q_vectors); + if (unlikely(!netdev->rx_cpu_rmap)) + return -EINVAL; + + base_idx = adpt->base_vector; + for (i = 0; i < adpt->num_q_vectors; i++) { + if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, ne6x_get_irq_num(pf, base_idx + i))) { + ne6x_free_cpu_rx_rmap(adpt); + return -EINVAL; + } + } + + return 0; +} + +int ne6x_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_arfs_entry *arfs_entry; + struct ne6x_adapter *adpt = np->adpt; + struct flow_keys fk; + struct ne6x_pf *pf; + __be16 n_proto; + u8 ip_proto; + u16 idx; + u32 hash; + int ret; + + if (unlikely(!(netdev->features & NETIF_F_NTUPLE))) + return -ENODEV; + + /* failed to allocate memory for aRFS so don't crash */ + if (unlikely(!adpt->arfs_fltr_list)) + return -ENODEV; + + pf = adpt->back; + + /* aRFS only supported on Rx queues belonging to PF ADPT */ + if (rxq_idx >= adpt->num_queue) + return -EOPNOTSUPP; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; + + n_proto = fk.basic.n_proto; + /* Support only IPV4 and IPV6 */ + if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) || + n_proto == htons(ETH_P_IPV6)) + ip_proto = fk.basic.ip_proto; + else + return -EPROTONOSUPPORT; + + /* Support only TCP and UDP */ + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) + return -EPROTONOSUPPORT; + + /* choose the aRFS list bucket based on skb hash */ + hash = skb_get_hash_raw(skb); + idx = skb_get_hash_raw(skb) & NE6X_ARFS_LST_MASK; + /* search for entry in the bucket */ + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry(arfs_entry, &adpt->arfs_fltr_list[idx], + list_entry) { + struct ne6x_fster_fltr *fltr_info = &arfs_entry->fltr_info; + + /* keep searching for the already existing arfs_entry flow */ + if (!ne6x_arfs_cmp(fltr_info, &fk)) + continue; + + ret = fltr_info->fltr_id; + + if (fltr_info->q_index == rxq_idx || + arfs_entry->fltr_state != NE6X_ARFS_ACTIVE) + goto out; + + /* update the queue to forward to on an already existing flow */ + fltr_info->q_index = rxq_idx; + fltr_info->data.cos = cpu_to_be16(rxq_idx); + arfs_entry->fltr_state = NE6X_ARFS_INACTIVE; + ne6x_arfs_update_active_fltr_cntrs(adpt, arfs_entry, false); + goto out_schedule_service_task; + } + + arfs_entry = ne6x_arfs_build_entry(adpt, &fk, hash, rxq_idx, flow_id); + if (!arfs_entry) { + ret = -ENOMEM; + goto out; + } + + ret = arfs_entry->fltr_info.fltr_id; + INIT_HLIST_NODE(&arfs_entry->list_entry); + hlist_add_head(&arfs_entry->list_entry, &adpt->arfs_fltr_list[idx]); +out_schedule_service_task: + ne6x_service_event_schedule(pf); +out: + spin_unlock_bh(&adpt->arfs_lock); + return ret; +} + +static int ne6x_init_arfs_cntrs(struct ne6x_adapter *adpt) +{ + if (!adpt) + return -EINVAL; + + adpt->arfs_fltr_cntrs = kzalloc(sizeof(*adpt->arfs_fltr_cntrs), + GFP_KERNEL); + if (!adpt->arfs_fltr_cntrs) + return -ENOMEM; + + adpt->arfs_last_fltr_id = kzalloc(sizeof(*adpt->arfs_last_fltr_id), + GFP_KERNEL); + if (!adpt->arfs_last_fltr_id) { + kfree(adpt->arfs_fltr_cntrs); + adpt->arfs_fltr_cntrs = NULL; + return -ENOMEM; + } + + return 0; +} + +void ne6x_init_arfs(struct ne6x_adapter *adpt) +{ + struct hlist_head *arfs_fltr_list; + unsigned int i; + + if (!adpt) + return; + + arfs_fltr_list = kcalloc(NE6X_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), + GFP_KERNEL); + if (!arfs_fltr_list) + return; + + if (ne6x_init_arfs_cntrs(adpt)) + goto free_arfs_fltr_list; + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) + INIT_HLIST_HEAD(&arfs_fltr_list[i]); + + spin_lock_init(&adpt->arfs_lock); + + adpt->arfs_fltr_list = arfs_fltr_list; + + return; + +free_arfs_fltr_list: + kfree(arfs_fltr_list); +} + +void ne6x_clear_arfs(struct ne6x_adapter *adpt) +{ + struct device *dev; + unsigned int i; + struct ne6x_arfs_entry *r; + struct hlist_node *n; + HLIST_HEAD(tmp_del_list); + + if (!adpt || !adpt->back || !adpt->arfs_fltr_list) + return; + + dev = ne6x_pf_to_dev(adpt->back); + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) { + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry_safe(r, n, &adpt->arfs_fltr_list[i], + list_entry) { + if (r->fltr_state == NE6X_ARFS_ACTIVE || r->fltr_state == NE6X_ARFS_TODEL) { + hlist_del(&r->list_entry); + hlist_add_head(&r->list_entry, &tmp_del_list); + } + } + spin_unlock_bh(&adpt->arfs_lock); + } + + hlist_for_each_entry_safe(r, n, &tmp_del_list, list_entry) { + ne6x_dev_del_fster_rules(adpt, &r->fltr_info, false); + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) { + struct ne6x_arfs_entry *r; + struct hlist_node *n; + + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry_safe(r, n, &adpt->arfs_fltr_list[i], + list_entry) { + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + spin_unlock_bh(&adpt->arfs_lock); + } + + kfree(adpt->arfs_fltr_list); + adpt->arfs_fltr_list = NULL; + kfree(adpt->arfs_last_fltr_id); + adpt->arfs_last_fltr_id = NULL; + kfree(adpt->arfs_fltr_cntrs); + adpt->arfs_fltr_cntrs = NULL; +} + +void ne6x_remove_arfs(struct ne6x_adapter *adpt) +{ + if (!adpt) + return; + + ne6x_clear_arfs(adpt); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h new file mode 100644 index 000000000000..a24d9f19d478 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_ARFS_H +#define _NE6X_ARFS_H + +/* protocol enumeration for filters */ +enum ne6x_fltr_ptype { + /* NONE - used for undef/error */ + NE6X_FLTR_PTYPE_NONF_NONE = 0, + NE6X_FLTR_PTYPE_NONF_IPV4_UDP, + NE6X_FLTR_PTYPE_NONF_IPV4_TCP, + NE6X_FLTR_PTYPE_NONF_IPV6_UDP, + NE6X_FLTR_PTYPE_NONF_IPV6_TCP, + NE6X_FLTR_PTYPE_MAX, +}; + +struct ne6x_fster_v4 { + __be32 rsv0[3]; + __be32 dst_ip; + __be32 rsv1[3]; + __be32 src_ip; + __be16 dst_port; + __be16 src_port; + __be16 rsv2; + u8 pi; + u8 proto; + u8 rsv3[24]; +}; + +#define NE6X_IPV6_ADDR_LEN_AS_U32 4 + +struct ne6x_fster_v6 { + __be32 dst_ip[NE6X_IPV6_ADDR_LEN_AS_U32]; + __be32 src_ip[NE6X_IPV6_ADDR_LEN_AS_U32]; + __be16 dst_port; + __be16 src_port; + __be16 rsv0; + u8 pi; + u8 proto; + u8 rsv1[24]; +}; + +struct ne6x_fster_data { + u8 tab_id; + u8 port; + __be16 cos; + __be32 hash; + u8 rsv0[24]; +}; + +struct ne6x_fster_table { + union { + struct ne6x_fster_v4 v4; + struct ne6x_fster_v6 v6; + } ip; + struct ne6x_fster_data data; +}; + +struct ne6x_fster_search_result { + u32 key_index; + struct ne6x_fster_data data; +}; + +struct ne6x_fster_fltr { + struct list_head fltr_node; + enum ne6x_fltr_ptype flow_type; + + union { + struct ne6x_fster_v4 v4; + struct ne6x_fster_v6 v6; + } ip; + struct ne6x_fster_data data; + + /* filter control */ + u16 q_index; + u16 dest_adpt; + u8 cnt_ena; + u16 cnt_index; + u32 fltr_id; +}; + +enum ne6x_arfs_fltr_state { + NE6X_ARFS_INACTIVE, + NE6X_ARFS_ACTIVE, + NE6X_ARFS_TODEL, +}; + +struct ne6x_arfs_entry { + struct ne6x_fster_fltr fltr_info; + struct ne6x_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + struct hlist_node list_entry; + u64 time_activated; /* only valid for UDP flows */ + u32 flow_id; + /* fltr_state = 0 - NE6X_ARFS_INACTIVE: + * filter needs to be updated or programmed in HW. + * fltr_state = 1 - NE6X_ARFS_ACTIVE: + * filter is active and programmed in HW. + * fltr_state = 2 - NE6X_ARFS_TODEL: + * filter has been deleted from HW and needs to be removed from + * the aRFS hash table. + */ + u8 fltr_state; +}; + +struct ne6x_arfs_entry_ptr { + struct ne6x_arfs_entry *arfs_entry; + struct hlist_node list_entry; +}; + +struct ne6x_arfs_active_fltr_cntrs { + atomic_t active_tcpv4_cnt; + atomic_t active_tcpv6_cnt; + atomic_t active_udpv4_cnt; + atomic_t active_udpv6_cnt; +}; + +#ifdef CONFIG_RFS_ACCEL +int +ne6x_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id); +void ne6x_clear_arfs(struct ne6x_adapter *adpt); +void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt); +void ne6x_init_arfs(struct ne6x_adapter *adpt); +void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf); +int ne6x_set_cpu_rx_rmap(struct ne6x_adapter *adpt); +void ne6x_remove_arfs(struct ne6x_adapter *adpt); +#else +static inline void ne6x_clear_arfs(struct ne6x_adapter *adpt) { } +static inline void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt) { } +static inline void ne6x_init_arfs(struct ne6x_adapter *adpt) { } +static inline void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf) { } +static inline void ne6x_remove_arfs(struct ne6x_adapter *adpt) { } + +static inline int ne6x_set_cpu_rx_rmap(struct ne6x_adapter __always_unused *adpt) +{ + return 0; +} + +static inline int +ne6x_rx_flow_steer(struct net_device __always_unused *netdev, + const struct sk_buff __always_unused *skb, + u16 __always_unused rxq_idx, u32 __always_unused flow_id) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_RFS_ACCEL */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c new file mode 100644 index 000000000000..b945381ee8e8 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c @@ -0,0 +1,2397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include + +#include "ne6x.h" +#include "ne6x_debugfs.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_arfs.h" + +#define NE6X_CQ_TO_OFF_TX(__desc, __idx) \ + (((__desc)->payload.data[3 * (__idx) + 1] << 0) | \ + ((__desc)->payload.data[3 * (__idx) + 2] << 8)) +#define NE6X_CQ_TO_STS_TX(__desc, __idx) ((__desc)->payload.data[3 * (__idx)]) + +#define NE6X_CQ_TO_LEN_RX(__desc, __idx) \ + (((__desc)->payload.data[5 * (__idx) + 1] << 0) | \ + ((__desc)->payload.data[5 * (__idx) + 2] << 8)) +#define NE6X_CQ_TO_STS_RX(__desc, __idx) ((__desc)->payload.data[5 * (__idx)]) +#define NE6X_CQ_TO_OFF_RX(__desc, __idx) \ + (((__desc)->payload.data[5 * (__idx) + 3] << 0) | \ + ((__desc)->payload.data[5 * (__idx) + 4] << 8)) + +#define PARA_KEY_STRING " " +#define ARRAY_P_MAX_COUNT 140 +#define HASH_KEY_SIZE 64 +#define HASH_DATA_SIZE 64 +#define TABLE_WIDHT_BIT_512 512 +#define TABLE_WIDHT_BIT_128 128 +#define TABLE_WIDHT_BIT_64 64 +#define TABLE_WIDHT_BIT_16 16 +#define TABLE_WIDHT_BIT_256 256 +#define TABLE_WIDHT_BIT_32 32 + +#define FRU_CHECK_6ASCII(x) (((x) >> 6) == 0x2) +#define ASCII628_BASE 32 +#define FRU_6BIT_8BITLENGTH(x) (((x) * 4) / 3) + +static int table_size[] = { + TABLE_WIDHT_BIT_512, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_16, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_256, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_32 +}; + +const struct ne6x_debug_info ne6x_device_info[] = { + {0xE220, "N5E025P2-PAUA", "25G"}, {0xE22C, "N5E025P2-NAUA", "25G"}, + {0xE221, "N5S025P2-PAUA", "25G"}, {0xE22D, "N5S025P2-NAUA", "25G"}, + {0xEA20, "N6E100P2-PAUA", "100G"}, {0xEA2C, "N6E100P2-NAUA", "100G"}, + {0xEA21, "N6S100P2-PAUA", "100G"}, {0xEA2D, "N6S100P2-NAUA", "100G"}, + {0xD221, "N6S025P2-PDUA", "25G"}, {0xDA21, "N6S100P2-PDUA", "100G"}, + {0x1220, "N5E025P2-PAGA", "25G"}, {0x122C, "N5E025P2-NAGA", "25G"}, + {0x1221, "N5S025P2-PAGA", "25G"}, {0x122D, "N5S025P2-NAGA", "25G"}, + {0x1A20, "N6E100P2-PAGA", "100G"}, {0x1A2C, "N6E100P2-NAGA", "100G"}, + {0x1A21, "N6S100P2-PAGA", "100G"}, {0x1A2D, "N6S100P2-NAGA", "100G"}, + {0x0221, "N6S100P2-NAGA", "100G"}, {0x0A21, "N6S100P2-PDGA", "100G"} }; + +char *my_strtok(char *p_in_string, char *p_in_delimit, char **pp_out_ret) +{ + static char *p_tmp; + char *p_strstr = NULL; + char *ret = NULL; + int for_index; + + if (!pp_out_ret) + return NULL; + + *pp_out_ret = NULL; + if (!p_in_delimit) + return p_in_string; + + if (p_in_string) + p_tmp = p_in_string; + + if (!p_tmp) + return NULL; + + ret = p_tmp; + p_strstr = strstr(p_tmp, p_in_delimit); + if (p_strstr) { + p_tmp = p_strstr + strlen(p_in_delimit); + for (for_index = 0; for_index < strlen(p_in_delimit); for_index++) + *(p_strstr + for_index) = '\0'; + } else { + p_tmp = NULL; + } + + *pp_out_ret = p_tmp; + + return ret; +} + +int my_isdigit(char in_char) +{ + if ((in_char >= '0') && (in_char <= '9')) + return 1; + else + return 0; +} + +int my_atoi(char *p_in_string) +{ + int flag = 1; + int ret = 0; + + while (my_isdigit(p_in_string[0]) == 0) + p_in_string++; + + if (*(p_in_string - 1) == '-') + flag = -1; + + while (my_isdigit(p_in_string[0]) != 0) { + ret *= 10; + ret += p_in_string[0] - '0'; + if (ret > INT_MAX || ret < INT_MIN) + return 0; + + p_in_string++; + } + + if (ret != 0) + return (flag * ret); + else + return 0; +} + +static struct dentry *ne6x_dbg_root; +u8 *ne6x_dbg_get_fru_product_part(u8 *buffer, enum fru_product_part part, u8 *len); + +void ne6x_dbg_show_queue(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_OFST)); + } + dev_info(&pf->pdev->dev, "----RX: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], idle:%04d, alloc:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(ring), ring->next_to_alloc, + ring->next_to_use, ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_OFST)); + } + dev_info(&pf->pdev->dev, "----TX: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(ring), ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_CQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_CQ_TAIL_POINTER)); + } + dev_info(&pf->pdev->dev, "----CQ: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, NE6X_DESC_UNUSED(ring), ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_show_ring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int i, j, k, l; + union ne6x_rx_desc *rx_desc; + struct ne6x_tx_desc *tx_desc; + struct ne6x_cq_desc *cq_desc; + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + for (k = 0; k < ring->count; k++) { + rx_desc = NE6X_RX_DESC(ring, k); + if (!rx_desc->wb.u.val) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** rx_desc[%d], vp[%d], mml[%d], sml[%d], bsa[0x%llx], bma[0x%llx], flag[0x%x], vp[%d], pkt_len[%d]\n", + k, rx_desc->w.vp, rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, rx_desc->wb.u.val, + rx_desc->wb.vp, rx_desc->wb.pkt_len); + } + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + for (k = 0; k < ring->count; k++) { + tx_desc = NE6X_TX_DESC(ring, k); + if (!tx_desc->buffer_mop_addr) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** tx_desc[%d], flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d],sopv[%d],eopv[%d],tso[%d],l3chk[%d],l3oft[%d],l4chk[%d],l4oft[%d],pld[%d],mop[%d],sop[%d],mss[%d],mopa[%lld],sopa[%lld]\n", + k, tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, + tx_desc->chain, tx_desc->transmit_type, tx_desc->sop_valid, + tx_desc->eop_valid, tx_desc->tso, tx_desc->l3_csum, + tx_desc->l3_ofst, tx_desc->l4_csum, tx_desc->l4_ofst, + tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, + tx_desc->mss, tx_desc->buffer_mop_addr, + tx_desc->buffer_sop_addr); + } + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + for (k = 0; k < ring->count; k++) { + cq_desc = NE6X_CQ_DESC(ring, k); + if (!cq_desc->num) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, + "**** cq_desc[%d], vp[%d], ctype[%d], num[%d]\n", k, + ring->reg_idx, cq_desc->ctype, cq_desc->num); + for (l = 0; l < cq_desc->num; l++) { + if (cq_desc->ctype == 0) + dev_info(&pf->pdev->dev, + "******[TX] %d:%d val:0x%x\n", l, + NE6X_CQ_TO_OFF_TX(cq_desc, l), + NE6X_CQ_TO_STS_TX(cq_desc, l)); + else + dev_info(&pf->pdev->dev, + "******[RX] %d:%d val:0x%x len:0x%x\n", l, + NE6X_CQ_TO_OFF_RX(cq_desc, l), + NE6X_CQ_TO_STS_RX(cq_desc, l), + NE6X_CQ_TO_LEN_RX(cq_desc, l)); + } + } + } + } +} + +void ne6x_dbg_show_txtail(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int i, j; + struct ne6x_adapter *adpt; + struct ne6x_ring *ring; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] TX queue[%d] processed %llx packets\n", i, j, + readq(ring->tail + j)); + } + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + } +} + +void ne6x_dbg_show_txq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] TX queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + } +} + +void ne6x_dbg_show_rxq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] RX queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_show_cq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] CQ queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_clean_queue(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = adpt->tx_rings[j]; + rx_ring = adpt->rx_rings[j]; + cq_ring = adpt->cq_rings[j]; + + memset(&tx_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&tx_ring->tx_stats, 0, sizeof(struct ne6x_txq_stats)); + + memset(&rx_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&rx_ring->rx_stats, 0, sizeof(struct ne6x_rxq_stats)); + + memset(&cq_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&cq_ring->cq_stats, 0, sizeof(struct ne6x_cq_stats)); + } + dev_info(&pf->pdev->dev, "---------------------------adpt[%d] all ring cleaned---------------------------------------", + i); + } +} + +void ne6x_dbg_show_txring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *tx_ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------tx begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = adpt->tx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_OFST)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d, busy:%lld\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(tx_ring), + tx_ring->next_to_use, tx_ring->next_to_clean, + tx_ring->tx_stats.tx_busy); + } + } + dev_info(&pf->pdev->dev, "+----------------------------tx end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_rxring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *rx_ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------rx begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + rx_ring = adpt->rx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_OFST)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], alloc:%04d, use:%04d, clean:%04d, cq_expect:%04d\n", + i, j, head, tail, oft, rx_ring->next_to_alloc, + rx_ring->next_to_use, rx_ring->next_to_clean, + rx_ring->cq_last_expect); + } + } + dev_info(&pf->pdev->dev, "+----------------------------rx end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_cqring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *cq_ring; + struct ne6x_adapter *adpt; + int queue_num = 0; + u64 head, tail; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------cq begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + cq_ring = adpt->cq_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, NE6X_DESC_UNUSED(cq_ring), cq_ring->next_to_use, + cq_ring->next_to_clean); + } + } + dev_info(&pf->pdev->dev, "+----------------------------cq end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_txdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + struct ne6x_tx_desc *tx_desc = NULL; + struct ne6x_ring *tx_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + + tx_ring = adpt->tx_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+-----------------------------------Netdev[%d] - Queue[%d] - tx_desc begin-----------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < tx_ring->count; i++) { + tx_desc = NE6X_TX_DESC(tx_ring, i); + if (!tx_desc->buffer_mop_addr && i != 0) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "tx_desc[%d]\n", i); + dev_info(&pf->pdev->dev, "struct ne6x_tx_desc\n" + "{\n" + " u8 flags : 8; [0x%x]\n" + " u8 vp : 7; [%d]\n" + " u8 event_trigger : 1; [%d]\n" + " u8 chain : 1; [%d]\n" + " u8 transmit_type : 2; [%d]\n" + " u8 sop_valid : 1; [%d]\n" + " u8 eop_valid : 1; [%d]\n" + " u8 tso : 1; [%d]\n" + " u8 l3_csum : 1; [%d]\n" + " u8 l3_ofst : 7; [%d]\n" + " u8 l4_csum : 1; [%d]\n" + " u8 l4_ofst : 7; [%d]\n" + " u8 pld_ofst; [%d]\n" + " __le64 mop_cnt : 24; [%d]\n" + " __le64 sop_cnt : 16; [%d]\n" + " __le64 mss : 16; [%d]\n" + " __le64 buffer_mop_addr; [%lld]\n" + " __le64 buffer_sop_addr; [%lld]\n" + "};\n", + tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, tx_desc->chain, + tx_desc->transmit_type, tx_desc->sop_valid, tx_desc->eop_valid, tx_desc->tso, + tx_desc->l3_csum, tx_desc->l3_ofst, tx_desc->l4_csum, tx_desc->l4_ofst, + tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, tx_desc->mss, + tx_desc->buffer_mop_addr, tx_desc->buffer_sop_addr); + } + dev_info(&pf->pdev->dev, "+------------------------------------------------Netdev[%d] - Queue[%d] - tx_desc end--------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_rxdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + union ne6x_rx_desc *rx_desc = NULL; + struct ne6x_ring *rx_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + rx_ring = adpt->rx_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+-------------------------------------------------Netdev[%d] - Queue[%2d] - rx_desc begin-------------------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < rx_ring->count; i++) { + rx_desc = NE6X_RX_DESC(rx_ring, i); + + if (!rx_desc->wb.u.val) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** Netdev[%d], Queue[%02d], rx_desc[%d], vp[%d], mml[%d], sml[%d], bsa[0x%llx], bma[0x%llx], flag[0x%x], vp[%d], p[0x%02x%02x%02x%02x%02x%02x%02x%02x], pkt_len[%d]\n", + adpt_num, queue_num, i, rx_desc->w.vp, rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, rx_desc->wb.u.val, rx_desc->wb.vp, + rx_desc->wb.pd[0], rx_desc->wb.pd[1], rx_desc->wb.pd[2], rx_desc->wb.pd[3], + rx_desc->wb.pd[4], rx_desc->wb.pd[5], rx_desc->wb.pd[6], rx_desc->wb.pd[7], + rx_desc->wb.pkt_len); + } + dev_info(&pf->pdev->dev, "+-------------------------------------------------Netdev[%d] - Queue[%d] - rx_desc end----------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_cqdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + struct ne6x_cq_desc *cq_desc = NULL; + struct ne6x_ring *cq_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i, j; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + cq_ring = adpt->cq_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+--------------------------------------------------Netdev[%d] - Queue[%d] - cq_desc begin------------------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < cq_ring->count; i++) { + cq_desc = NE6X_CQ_DESC(cq_ring, i); + + if (!cq_desc->num) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** Netdev[%d], Queue[%02d], cq_desc[%d], vp[%d], ctype[%s], num[%d]\n", + adpt_num, queue_num, i, cq_ring->reg_idx, + cq_desc->ctype == 0 ? "tx" : "rx", + cq_desc->num); + for (j = 0; j < cq_desc->num; j++) { + if (cq_desc->ctype == 0) + dev_info(&pf->pdev->dev, "******TX%d[%d]: val:0x%x\n", j, + NE6X_CQ_TO_OFF_TX(cq_desc, j), + NE6X_CQ_TO_STS_TX(cq_desc, j)); + else + dev_info(&pf->pdev->dev, "******RX%d[%d]: val:0x%x len:%d\n", j, + NE6X_CQ_TO_OFF_RX(cq_desc, j), + NE6X_CQ_TO_STS_RX(cq_desc, j), + NE6X_CQ_TO_LEN_RX(cq_desc, j)); + } + } + dev_info(&pf->pdev->dev, "+--------------------------------------------------Netdev[%d] - Queue[%d] - cq_desc end--------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +#ifdef CONFIG_RFS_ACCEL +void ne6x_dbg_show_arfs_cnt(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 idx = 0; + struct ne6x_adapter *pf_adpt; + struct ne6x_arfs_active_fltr_cntrs *fltr_cntrs = NULL; + + ne6x_for_each_pf(pf, idx) { + pf_adpt = pf->adpt[idx]; + fltr_cntrs = pf_adpt->arfs_fltr_cntrs; + dev_info(&pf->pdev->dev, "+---------------------------+\n"); + dev_info(&pf->pdev->dev, "pf_num:%d totle_num:%d\n\t\t\t tcp_v4_num:%d\n\t\t\t udp_v4_num:%d\n\t\t\t tcp_v6_num:%d\n\t\t\t udp_v6_num:%d\n", + idx, (atomic_read(&fltr_cntrs->active_tcpv4_cnt) + + atomic_read(&fltr_cntrs->active_udpv4_cnt) + + atomic_read(&fltr_cntrs->active_tcpv6_cnt) + + atomic_read(&fltr_cntrs->active_udpv6_cnt)), + atomic_read(&fltr_cntrs->active_tcpv4_cnt), + atomic_read(&fltr_cntrs->active_udpv4_cnt), + atomic_read(&fltr_cntrs->active_tcpv6_cnt), + atomic_read(&fltr_cntrs->active_udpv6_cnt)); + dev_info(&pf->pdev->dev, "+---------------------------+\n"); + } +} +#endif + +extern u32 ne6x_dev_crc32(const u8 *buf, u32 size); + +void ne6x_dbg_apb_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u64 offset; + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &addr); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "apb_read \n"); + return; + } + + offset = addr; + value = ne6x_reg_apb_read(pf, offset); + dev_info(&pf->pdev->dev, "offset = 0x%08X 0x%08X\n", addr, value); +} + +void ne6x_dbg_apb_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u64 offset; + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "apb_write \n"); + return; + } + + offset = addr; + ne6x_reg_apb_write(pf, offset, value); + dev_info(&pf->pdev->dev, "apb_write: 0x%llx = 0x%x\n", offset, value); +} + +void ne6x_dbg_mem_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int index = 0, cnt; + u32 *reg_data; + u64 offset; + u32 addr; + u32 size; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &size); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "mem_read \n"); + return; + } + + reg_data = kzalloc((size + 4) * 4, GFP_KERNEL); + offset = addr; + for (index = 0x00; index < size; index++) + reg_data[index] = ne6x_reg_apb_read(pf, offset + index * 4); + + for (index = 0x00; index < size / 4; index++) + dev_info(&pf->pdev->dev, "%lx: %08X %08X %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1], reg_data[4 * index + 2], reg_data[4 * index + 3]); + + if ((size % 4) == 1) + dev_info(&pf->pdev->dev, "%lx: %08X\n", (unsigned int long)(offset + index * 16), + reg_data[4 * index]); + else if ((size % 4) == 2) + dev_info(&pf->pdev->dev, "%lx: %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1]); + else if ((size % 4) == 3) + dev_info(&pf->pdev->dev, "%lx: %08X %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1], reg_data[4 * index + 2]); + + kfree((void *)reg_data); +} + +void ne6x_dbg_mem_write(struct ne6x_pf *pf, char *cmd_buf, int count) {} + +void ne6x_dbg_templ_help(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + dev_info(&pf->pdev->dev, "HW_FEATURES = 0\n"); + dev_info(&pf->pdev->dev, "HW_FLAGS = 1\n"); + dev_info(&pf->pdev->dev, "RSS_TABLE_SIZE = 2\n"); + dev_info(&pf->pdev->dev, "RSS_TABLE_ENTRY_WIDTH = 3\n"); + dev_info(&pf->pdev->dev, "RSS_HASH_KEY_BLOCK_SIZE = 4\n"); + dev_info(&pf->pdev->dev, "PORT2PI_0 = 5\n"); + dev_info(&pf->pdev->dev, "PI2PORT_0 = 25\n"); + dev_info(&pf->pdev->dev, "VLAN_TYPE = 33\n"); + dev_info(&pf->pdev->dev, "PI0_BROADCAST_LEAF = 37\n"); + dev_info(&pf->pdev->dev, "PORT_OLFLAGS_0 = 53\n"); + dev_info(&pf->pdev->dev, "PORT_2_COS_0 = 121\n"); + dev_info(&pf->pdev->dev, "VPORT0_LINK_STATUS = 155\n"); + dev_info(&pf->pdev->dev, "TSO_CKSUM_DISABLE = 156\n"); + dev_info(&pf->pdev->dev, "PORT0_MTU = 157\n"); + dev_info(&pf->pdev->dev, "PORT0_QINQ = 161\n"); + dev_info(&pf->pdev->dev, "CQ_SIZE = 229\n"); +} + +void ne6x_dbg_templ_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 vport; + u32 value; + u32 type; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &vport, &type); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "temp_read \n"); + return; + } + + ne6x_reg_get_user_data(pf, vport + type, &value); + dev_info(&pf->pdev->dev, "temp_read 0x%04X value 0x%08X\n", type, value); +} + +void ne6x_dbg_templ_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 vport; + u32 value; + u32 type; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i %i", &vport, &type, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "temp_write \n"); + return; + } + + ne6x_reg_set_user_data(pf, vport + type, value); + dev_info(&pf->pdev->dev, "temp_write: 0x%04x = 0x%x\n", type, value); +} + +void ne6x_dbg_soc_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &addr); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "soc_read \n"); + return; + } + + ne6x_reg_indirect_read(pf, addr, &value); + dev_info(&pf->pdev->dev, "offset = 0x%08X 0x%08X\n", addr, value); +} + +void ne6x_dbg_soc_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "soc_write \n"); + return; + } + + ne6x_reg_indirect_write(pf, addr, value); + dev_info(&pf->pdev->dev, "soc_write: 0x%08X = 0x%08X\n", addr, value); +} + +void ne6x_dbg_tab_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int array_index = 0, ret, index; + struct ne6x_debug_table *table_info; + u8 *p_str_array[10] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 10) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 2) { + dev_warn(&pf->pdev->dev, "tab_read \n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* index */ + if (!strncmp(p_str_array[1], "0x", 2)) + table_info->index = simple_strtoul(p_str_array[1], NULL, 16); + else + table_info->index = my_atoi(p_str_array[1]); + + table_info->size = table_size[table_info->table]; + ret = ne6x_reg_table_read(pf, table_info->table, table_info->index, + (u32 *)&table_info->data[0], table_info->size); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success" : "timeout!"); + + for (index = 0x00; index < (table_info->size >> 2) / 4; index++) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2], table_info->data[4 * index + 3]); + + if (((table_info->size >> 2) % 4) == 1) + dev_info(&pf->pdev->dev, "%08X: %08X\n", index * 16, table_info->data[4 * index]); + else if (((table_info->size >> 2) % 4) == 2) + dev_info(&pf->pdev->dev, "%08X: %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1]); + else if (((table_info->size >> 2) % 4) == 3) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2]); + + kfree(table_info); +} + +void ne6x_dbg_set_mac_to_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 mac_addr[6]; + int port = 0; + int ret; + int cnt; + + if (strncmp(cmd_buf, "P0", 2) == 0) { + port = 0; + } else if (strncmp(cmd_buf, "P1", 2) == 0) { + port = 1; + } else { + dev_warn(&pf->pdev->dev, "set_port_mac P0/P1 macaddr\n"); + dev_warn(&pf->pdev->dev, "example-- set_port_mac P0 94:f5:21:00:00:01\n"); + return; + } + + cnt = sscanf(&cmd_buf[2], "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX", &mac_addr[0], &mac_addr[1], + &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]); + if (cnt != 6) { + dev_warn(&pf->pdev->dev, "set_port_mac P0/P1 macaddr\n"); + dev_warn(&pf->pdev->dev, "example-- set_port_mac P0 94:f5:24:00:00:01\n"); + return; + } + + if (port == 0) + memcpy(&psdk_spd_info->port_0_mac, &mac_addr, 6); + else if (port == 1) + memcpy(&psdk_spd_info->port_1_mac, &mac_addr, 6); + else if (port == 2) + memcpy(&psdk_spd_info->port_2_mac, &mac_addr, 6); + else if (port == 3) + memcpy(&psdk_spd_info->port_3_mac, &mac_addr, 6); + + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(*psdk_spd_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(*psdk_spd_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "set mac success!" : "set mac fail!"); +} + +void ne6x_dbg_get_mac(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 mac_addr[6]; + int port = 0; + + if (strncmp(cmd_buf, "P0", 2) == 0) { + port = 0; + } else if (strncmp(cmd_buf, "P1", 2) == 0) { + port = 1; + } else { + dev_warn(&pf->pdev->dev, "get_port_mac P0/P1\n"); + dev_warn(&pf->pdev->dev, "example-- get_port_mac P0\n"); + return; + } + + if (port == 0) + memcpy(&mac_addr, &psdk_spd_info->port_0_mac, 6); + else if (port == 1) + memcpy(&mac_addr, &psdk_spd_info->port_1_mac, 6); + else if (port == 2) + memcpy(&mac_addr, &psdk_spd_info->port_2_mac, 6); + else if (port == 3) + memcpy(&mac_addr, &psdk_spd_info->port_3_mac, 6); + else + return; + + dev_info(&pf->pdev->dev, "port %d: mac = %02x:%02x:%02x:%02x:%02x:%02x\n", port, + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); +} + +void ne6x_dbg_set_dev_type_to_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 *p_str_array[10] = {0}; + int array_index = 0, ret; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + u16 dev_type = 0; + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 10) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 1) { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + if (!strncmp(p_str_array[0], "0x", 2)) { + dev_type = simple_strtoul(p_str_array[0], NULL, 16); + } else { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + if (dev_type != NE6000AI_2S_X16H_25G_N5 && dev_type != NE6000AI_2S_X16H_25G_N6) { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + psdk_spd_info->product_mode = cpu_to_be16(dev_type); + psdk_spd_info->is_pcie_exist = 0x1; + + if (dev_type == NE6000AI_2S_X16H_25G_N5) { + psdk_spd_info->number_of_physical_controllers = 2; + psdk_spd_info->logic_port_to_phyical = cpu_to_be32(0x00000800); + } else if (dev_type == NE6000AI_2S_X16H_25G_N6) { + psdk_spd_info->number_of_physical_controllers = 2; + psdk_spd_info->logic_port_to_phyical = cpu_to_be32(0x00000100); + } else { + return; + } + + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "write eeprom mac success!" : "write eeprom mac fail!"); +} + +void ne6x_dbg_tab_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 8) { + dev_info(&pf->pdev->dev, "tab_write
...\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* index */ + if (!strncmp(p_str_array[1], "0x", 2)) + table_info->index = simple_strtoul(p_str_array[1], NULL, 16); + else + table_info->index = my_atoi(p_str_array[1]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 2); index++) { + if (!strncmp(p_str_array[index + 2], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 2], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 2]); + + table_info->size++; + } + + table_info->size = table_size[table_info->table]; + + ret = ne6x_reg_table_write(pf, table_info->table, table_info->index, + (u32 *)&table_info->data[0], table_info->size); + kfree(table_info); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : "timeout!"); +} + +void ne6x_dbg_tab_insert(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *p_str_array[ARRAY_P_MAX_COUNT] = {0}; + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u32 table_id = 0xffffffff; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= ARRAY_P_MAX_COUNT) + break; + + if (!p_tmp_ret) + break; + } + + /* 1 + 16 + 1+++ */ + if (array_index < 24) { + dev_warn(&pf->pdev->dev, "tab_insert
\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + + ret = ne6x_reg_table_search(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, NULL, + table_info->size); + if (ret == -ENOENT) { + table_info->size = 64 + table_size[table_info->table]; + ret = ne6x_reg_table_insert(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, + &table_id); + } else { + dev_info(&pf->pdev->dev, "0x%x 0x%x 0x%x 0x%x table exist\n", table_info->data[0], + table_info->data[1], table_info->data[2], table_info->data[3]); + return; + } + if (ret == 0) + dev_info(&pf->pdev->dev, "insert rule_id = 0x%x\n", table_id); + + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : + ((ret != -ETIMEDOUT) ? "fail!" : "timeout!")); +} + +void ne6x_dbg_tab_delete(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int array_index = 0, ret, index; + struct ne6x_debug_table *table_info; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 9) { + dev_warn(&pf->pdev->dev, "tab_delete
\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + + ret = ne6x_reg_table_delete(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size); + kfree(table_info); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : "timeout!"); +} + +void ne6x_dbg_tab_search(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + dev_info(&pf->pdev->dev, "array_index = %d\n", array_index); + if (array_index < 9) { + dev_warn(&pf->pdev->dev, "tab_delete
\n"); + kfree(table_info); + return; + } + + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + ret = ne6x_reg_table_search(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, + (u32 *)&table_info->data[0], table_info->size); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "success!" : ((ret == -ENOENT) ? "not fount!" : "timeout!")); + if (ret) + return; + + for (index = 0x00; index < (table_info->size >> 2) / 4; index++) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2], table_info->data[4 * index + 3]); + + if (((table_info->size >> 2) % 4) == 1) + dev_info(&pf->pdev->dev, "%08X: %08X\n", index * 16, table_info->data[4 * index]); + else if (((table_info->size >> 2) % 4) == 2) + dev_info(&pf->pdev->dev, "%08X: %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1]); + else if (((table_info->size >> 2) % 4) == 3) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2]); + + kfree(table_info); +} + +void ne6x_dbg_get_fru_info(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct file *fp = NULL; + u8 *buffer; + int para_count; + u32 size; + + para_count = sscanf(&cmd_buf[0], "%i", &size); + if (para_count != 1) { + dev_warn(&pf->pdev->dev, "fru_read \n"); + return; + } + + if (size > 512) { + dev_warn(&pf->pdev->dev, "size must less than 512\n."); + return; + } + + buffer = kzalloc((size + 4), GFP_KERNEL); + ne6x_dev_get_fru(pf, (u32 *)buffer, size); + + fp = filp_open("/opt/share/fru.bin", O_RDWR | O_CREAT, 0644); + if (!fp) { + dev_err(&pf->pdev->dev, "can't open /opt/share/fru.bin.\n"); + return; + } + + kernel_write(fp, (char *)buffer, size, &fp->f_pos); + filp_close(fp, NULL); +} + +u32 getparam(char *cmd_buf, u32 *param, int max_cnt) +{ + int ret, i, j, tmp, tmp1, tmp2, flag = 0; + u32 count = 0, cnt = 0, cnt_t = 0; + char *p = &cmd_buf[0]; + char *char_offset; + u32 *offset; + + offset = kzalloc((max_cnt + 1) * sizeof(u32), GFP_ATOMIC); + char_offset = kzalloc((max_cnt + 1) * sizeof(char), GFP_ATOMIC); + /* count the number */ + for (i = 0; i < strlen(cmd_buf); i++) { + if (cmd_buf[i] == ',' || cmd_buf[i] == '-') { + count++; + if (cmd_buf[i] == ',') { + offset[count] = i + 1; + char_offset[count] = ','; + } else if (cmd_buf[i] == '-') { + offset[count] = i + 1; + char_offset[count] = '-'; + } + } + if (cmd_buf[i] == ' ') + break; + + if (count >= max_cnt) + break; + } + + for (i = 0; i <= count; i++) { + ret = sscanf(p, "%i", ¶m[i + cnt_t]); + if (ret == 1) { + cnt++; + if (char_offset[cnt] == '-') { + flag++; + p = &cmd_buf[offset[cnt]]; + ret = sscanf(p, "%i", ¶m[i + cnt_t + 1]); + tmp1 = param[i + cnt_t]; + tmp2 = param[i + cnt_t + 1]; + if (ret == 1) { + tmp = i + cnt_t; + for (j = 0; j <= tmp2 - tmp1; j++) + param[tmp + j] = tmp1 + j; + } + cnt_t += tmp2 - tmp1; + + cnt++; + } + p = &cmd_buf[offset[cnt]]; + } + } + + kfree(offset); + + return cnt + cnt_t - 2 * flag; +} + +void ne6x_dbg_show_pcie_drop_counter(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + union ne6x_eth_recv_cnt eth_recv_cnt; + u64 __iomem *reg; + + reg = (void __iomem *)pf->hw.hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ETH_RECV_CNT); + eth_recv_cnt.val = readq(reg); + dev_info(&pf->pdev->dev, "pcie drop cnt = %d\n", eth_recv_cnt.reg.csr_eth_pkt_drop_cnt + + eth_recv_cnt.reg.csr_eth_rdq_drop_cnt); +} + +void ne6x_dbg_clr_table(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 table_id = 0, cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &table_id); + if (table_id == 6) + ne6x_reg_clear_table(pf, table_id); +} + +void ne6x_dbg_set_hw_flag_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + int flag = 0; + int ret; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &flag); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "\n0:none;1,ram white list;2,ddr white list\n"); + return; + } + + psdk_spd_info->hw_flag = cpu_to_be32(flag); + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "set hw_flag success!" + : "set hw_flag fail!"); +} + +void ne6x_dbg_erase_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 offset; + u32 length; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &offset, &length); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "norflash_erase \n"); + return; + } + + if (!ne6x_reg_erase_norflash(pf, offset, length)) + return; + + dev_err(&pf->pdev->dev, "norflash_erase fail.\n"); +} + +void ne6x_dbg_write_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *ptemp_data = NULL; + u32 offset = 0; + u32 length = 0; + u32 temp_data = 0; + u8 *ptemp = NULL; + int i = 0; + + ptemp_data = kzalloc(1024, GFP_ATOMIC); + + while ((ptemp = strsep(&cmd_buf, " "))) { + if (!strncmp(ptemp, "0x", 2)) + temp_data = simple_strtoul(ptemp, NULL, 16); + else + temp_data = my_atoi(ptemp); + + if (i == 0) + offset = temp_data; + else if (i == 1) + length = temp_data; + else + ptemp_data[i - 2] = (u8)temp_data; + + i++; + if (i == 1026) + break; + } + + if (length > 1024 || i < 2) { + dev_warn(&pf->pdev->dev, "norflash_write (byte split by space max 256)\n"); + goto pdata_memfree; + } + + if (!ne6x_reg_write_norflash(pf, offset, length, (u32 *)ptemp_data)) + dev_info(&pf->pdev->dev, "write norflash success.\n"); + else + dev_err(&pf->pdev->dev, "write norflash fail.\n"); + +pdata_memfree: + kfree(ptemp_data); +} + +void ne6x_dbg_read_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 offset = 0; + u32 length = 0; + u32 buffer_len; + char *pdata = NULL; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &offset, &length); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "norflash_read \n"); + return; + } + + buffer_len = length; + if (length % 4) + buffer_len = (length / 4 + 1) * 4; + + pdata = kzalloc(buffer_len, GFP_ATOMIC); + if (!ne6x_reg_read_norflash(pf, offset, buffer_len, (u32 *)pdata)) + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, pdata, length); + else + dev_err(&pf->pdev->dev, "read_norflash fail.\n"); + + kfree(pdata); +} + +void ne6x_dbg_meter_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *p_str_array[ARRAY_P_MAX_COUNT] = {0}; + u32 cir, type_num, type_flag = 0; + u32 cir_maxnum = 0xfffff; + u32 cbs_maxnum = 0xffffff; + struct meter_table vf_bw; + char *p_tmp_ret; + int index, ret = 0; + int array_index = 0; + u8 *p_in_string = NULL; + u32 data[3] = {0}; + u32 type = 0; + + p_in_string = &cmd_buf[0]; + p_tmp_ret = NULL; + + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= ARRAY_P_MAX_COUNT) + break; + if (!p_tmp_ret) + break; + } + if (array_index != 3) { + dev_warn(&pf->pdev->dev, "Incorrect input, please re-enter\n"); + return; + } + + for (index = 0; index < array_index; index++) { + if (!strncmp(p_str_array[index], "0x", 2)) + data[index] = simple_strtoul(p_str_array[index], NULL, 16); + else + data[index] = my_atoi(p_str_array[index]); + } + + type_num = data[0]; + switch (type_num) { + case 0: + type_flag |= NE6X_F_ACK_FLOOD; + break; + case 1: + type_flag |= NE6X_F_PUSH_ACK_FLOOD; + break; + case 2: + type_flag |= NE6X_F_SYN_ACK_FLOOD; + break; + case 3: + type_flag |= NE6X_F_FIN_FLOOD; + break; + case 4: + type_flag |= NE6X_F_RST_FLOOD; + break; + case 5: + type_flag |= NE6X_F_PUSH_SYN_ACK_FLOOD; + break; + case 6: + type_flag |= NE6X_F_UDP_FLOOD; + break; + case 7: + type_flag |= NE6X_F_ICMP_FLOOD; + break; + case 8: + type_flag |= NE6X_F_FRAGMENT_FLOOD; + break; + default: + dev_err(&pf->pdev->dev, "err_input,please enter one of'0-8'\n"); + return; + } + + if (data[1] == 1) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type); + type |= type_flag; + ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type); + } else if (data[1] == 0) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type); + type &= ~type_flag; + ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type); + } else { + dev_err(&pf->pdev->dev, "Input error, please enter '0' or '1'\n"); + return; + } + + cir = data[2] * 1000 + 1023; + cir = min((cir / 1024), cir_maxnum); + vf_bw.cir = cir; + vf_bw.pir = min((cir + cir / 10), cir_maxnum); + + vf_bw.cbs = min((vf_bw.cir * 10000), cbs_maxnum); + vf_bw.pbs = min((vf_bw.pir * 10000), cbs_maxnum); + ret = ne6x_reg_config_meter(pf, NE6X_METER1_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | type_num, + (u32 *)&vf_bw, sizeof(vf_bw)); + + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "write meter success!" : "write meter fail!"); +} + +const struct ne6x_dbg_cmd_wr deg_cmd_wr[] = { + {"queue", ne6x_dbg_show_queue}, + {"ring", ne6x_dbg_show_ring}, + {"txq", ne6x_dbg_show_txq}, + {"rxq", ne6x_dbg_show_rxq}, + {"cq", ne6x_dbg_show_cq}, + {"clean", ne6x_dbg_clean_queue}, + {"txtail", ne6x_dbg_show_txtail}, + {"txr", ne6x_dbg_show_txring}, + {"rxr", ne6x_dbg_show_rxring}, + {"cqr", ne6x_dbg_show_cqring}, +#ifdef CONFIG_RFS_ACCEL + {"arfs", ne6x_dbg_show_arfs_cnt}, +#endif + {"apb_read", ne6x_dbg_apb_read}, + {"apb_write", ne6x_dbg_apb_write}, + {"mem_read", ne6x_dbg_mem_read}, + {"mem_write", ne6x_dbg_mem_write}, + {"soc_read", ne6x_dbg_soc_read}, + {"soc_write", ne6x_dbg_soc_write}, + {"templ_help", ne6x_dbg_templ_help}, + {"templ_read", ne6x_dbg_templ_read}, + {"templ_write", ne6x_dbg_templ_write}, + {"tab_read", ne6x_dbg_tab_read}, + {"tab_write", ne6x_dbg_tab_write}, + {"tab_insert", ne6x_dbg_tab_insert}, + {"tab_delete", ne6x_dbg_tab_delete}, + {"tab_search", ne6x_dbg_tab_search}, + {"set_port_mac", ne6x_dbg_set_mac_to_eeprom}, + {"get_port_mac", ne6x_dbg_get_mac}, + {"fru_read", ne6x_dbg_get_fru_info}, + {"pcie_dropcnt", ne6x_dbg_show_pcie_drop_counter}, + {"clear_table", ne6x_dbg_clr_table}, + {"set_hw_flag", ne6x_dbg_set_hw_flag_eeprom}, + {"norflash_erase", ne6x_dbg_erase_norflash}, + {"norflash_write", ne6x_dbg_write_norflash}, + {"norflash_read", ne6x_dbg_read_norflash}, + {"meter_write", ne6x_dbg_meter_write}, +}; + +/** + * ne6x_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_command_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + return 0; +} + +static ssize_t ne6x_dbg_info_pnsn_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + u8 *pru_name = NULL, *pru_pn = NULL, *pru_sn = NULL; + char name_pre[INFO_COL] = {0}; + char name_aft[INFO_COL] = {0}; + struct ne6x_pf *pf = NULL; + u32 buf_size = 500; + char *name = NULL; + ssize_t len = 0; + u8 *buffer_data; + u8 length = 0; + u16 device_id; + int erro = 0; + int dex = 0; + int i = 0; + + if (*ppos > 0 || count < PAGE_SIZE) + return 0; + + name = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!name) + return -ENOMEM; + + buffer_data = kzalloc(buf_size, GFP_KERNEL); + if (!buffer_data) { + kfree(name); + return -ENOMEM; + } + + pf = filp->private_data; + ne6x_dev_get_fru(pf, (u32 *)buffer_data, buf_size); + + pru_name = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_NAME, &length); + if (!pru_name) { + dev_err(&pf->pdev->dev, "get pru_name info erro"); + device_id = pf->hw.subsystem_device_id; + if (!device_id) { + dev_err(&pf->pdev->dev, "subsystem_device_id is NULL!"); + erro = 1; + goto get_buffer_end; + } + + sprintf(name_pre, "Product Name: BeiZhongWangXin"); + sprintf(name_aft, "Ethernet Adapter"); + + for (i = 0; i < ARRAY_SIZE(ne6x_device_info); i++) { + if (device_id == ne6x_device_info[i].system_id) + dex = i; + } + + if (dex != -1) { + len = sprintf(name, "%s %s %s %s\n", name_pre, + ne6x_device_info[dex].system_name, + ne6x_device_info[dex].system_speed, name_aft); + } else { + dev_warn(&pf->pdev->dev, "subsystem_device_id not match"); + erro = 1; + goto get_buffer_end; + } + + } else { + len = sprintf(name, "Product Name: %s\n", pru_name); + } + + pru_pn = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_PART_NUMBER, &length); + if (pru_pn) + len = sprintf(name, "%s[PN] Part number: %s\n", name, pru_pn); + + pru_sn = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_SERIAL_NUMBER, &length); + if (pru_sn) + len = sprintf(name, "%s[SN] Serial number: %s\n", name, pru_sn); + + if (copy_to_user(buffer, name, len)) { + erro = 2; + goto get_buffer_end; + } + + if (!len) { + erro = 1; + goto get_buffer_end; + } + + *ppos = len; + goto get_buffer_end; + +get_buffer_end: + kfree(pru_pn); + kfree(pru_sn); + kfree(pru_name); + kfree(name); + kfree(buffer_data); + + if (erro == 1) + return 0; + else if (erro == 2) + return -EFAULT; + + return len; +} + +static bool ne6x_dbg_fru_checksum(const u8 *data, u32 len) +{ + u8 gl = 0; + u32 i; + + for (i = 0; i < len - 1; i++) + gl += data[i]; + + gl = ~gl + 1; + return gl == data[len - 1]; +} + +static int ne6x_dbg_fru_get_offset(u8 *buffer, enum fru_type type, u8 *offset) +{ + u8 hd[8] = {0}; + int i; + + for (i = 0; i < 8; i++) + hd[i] = buffer[i]; + + if (!(hd[0] & 0x1)) + return -2; + + if (!ne6x_dbg_fru_checksum(hd, 8)) + return -3; + + if (type < INTER_USE_AREA || type > MUILT_AREA) + return -4; + + *offset = hd[type + 1]; + + return 0; +} + +static u8 *ne6x_dbg_fru_6ascii28(const u8 *data, u8 *len) +{ + u8 len_bit_6, len_bit_8; + int i, i6, byte; + u8 *buf = NULL; + + len_bit_6 = data[0] & 0x3F; + len_bit_8 = FRU_6BIT_8BITLENGTH(len_bit_6); + buf = kzalloc(len_bit_8 + 1, GFP_ATOMIC); + + if (!buf) { + *len = 0; + return NULL; + } + + for (i = 0, i6 = 1; i6 <= len_bit_6 && i < len_bit_8 && data[i6]; i++) { + byte = (i - 1) % 4; + + switch (byte) { + case 0: + buf[i] = data[i6] & 0x3F; + break; + case 1: + buf[i] = (data[i6] >> 6) | (data[1 + i6] << 2); + i6++; + break; + case 2: + buf[i] = (data[i6] >> 4) | (data[1 + i6] << 4); + i6++; + break; + case 3: + buf[i] = data[i6++] >> 2; + break; + } + + buf[i] &= 0x3F; + buf[i] += ASCII628_BASE; + } + + *len = len_bit_8; + + return buf; +} + +u8 *ne6x_dbg_get_fru_product_part(u8 *buffer, enum fru_product_part part, u8 *len) +{ + u8 hd[2] = {0}; + u8 *pt = NULL; + u8 ofst = 0; + u32 i = 0; + + if (!buffer) + return NULL; + + if (ne6x_dbg_fru_get_offset(buffer, PRODUCT_AREA, &ofst) != 0 || ofst == 0) { + *len = 0; + return NULL; + } + + ofst *= 8; + hd[0] = buffer[ofst]; + hd[1] = buffer[ofst + 1]; + if (!(hd[0] & 0x1) || hd[1] == 0) + return NULL; + + if (!ne6x_dbg_fru_checksum(&buffer[ofst], hd[1] * 8)) + return NULL; + + ofst += 3; + + for (i = 0; i < part; i++) + ofst += 1 + (buffer[ofst] & 0x3f); + + if (FRU_CHECK_6ASCII(buffer[ofst])) { + pt = ne6x_dbg_fru_6ascii28(&buffer[ofst], len); + } else { + *len = (buffer[ofst] & 0x3f); + pt = kzalloc(*len, GFP_ATOMIC); + if (!pt) + return NULL; + + memcpy(pt, &buffer[ofst + 1], *len); + } + + return pt; +} + +void ne6x_dbg_update_adpt_speed(struct ne6x_adapter *adpt, u32 speed, u32 lane_mode) {} + +/** + * ne6x_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_command_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ne6x_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + struct ne6x_ring *tx_ring; + int bytes_not_copied; + struct ne6x_adapter *adpt; + int i, cnt = 0; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NE6X_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "updtail", 7) == 0) { + int idx, vp, tail; + + cnt = sscanf(&cmd_buf[7], "%d %d %d", &idx, &vp, &tail); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "updtail \n"); + goto command_write_done; + } + adpt = pf->adpt[idx ? 1 : 0]; + tx_ring = adpt->tx_rings[vp & 0xf]; + ne6x_tail_update(tx_ring, tail); + dev_info(&pf->pdev->dev, "write: adpt = %d vp = 0x%x tail_ptr = %d\n", idx ? 1 : 0, + vp, tail); + } else if (strncmp(cmd_buf, "memrd", 5) == 0) { + u32 base_addr; + u32 offset_addr = 0; + u64 value; + int index, vp; + + cnt = sscanf(&cmd_buf[5], "%d", &vp); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "memrd \n"); + goto command_write_done; + } + + offset_addr = 0x0; + for (index = 0; index < 0x20; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + if (base_addr == 0x13F) { + offset_addr = 0x21; + for (index = 0x21; index < 0x24; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0x39; + for (index = 0x39; index < 0x4E; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0x80; + for (index = 0x80; index < 0x95; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0xA3; + for (index = 0xA3; index < 0xA5; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + } + } else if (strncmp(cmd_buf, "read", 4) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value; + + cnt = sscanf(&cmd_buf[4], "%i %i", &base_addr, &offset_addr); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value; + + cnt = sscanf(&cmd_buf[5], "%i %i %lli ", &base_addr, &offset_addr, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + + ne6x_reg_pci_write(pf, base_addr, offset_addr, value); + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "write: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "wr", 2) == 0) { + u32 offset; + u32 value; + + cnt = sscanf(&cmd_buf[2], "%i %i", &offset, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "rr \n"); + goto command_write_done; + } + ne6x_reg_indirect_write(pf, offset, value); + dev_info(&pf->pdev->dev, "wr: 0x%x = 0x%x\n", offset, value); + } else if (strncmp(cmd_buf, "rr", 2) == 0) { + u32 offset; + u32 value; + + cnt = sscanf(&cmd_buf[2], "%i", &offset); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + value = ne6x_reg_indirect_read(pf, offset, &value); + dev_info(&pf->pdev->dev, "rr: 0x%x = 0x%x\n", offset, value); + } else if (strncmp(cmd_buf, "txd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "txd \n"); + goto command_write_done; + } + + ne6x_dbg_show_txdesc_states(adpt_num, quenue_num, pf); + } else if (strncmp(cmd_buf, "rxd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "rxd \n"); + goto command_write_done; + } + + ne6x_dbg_show_rxdesc_states(adpt_num, quenue_num, pf); + } else if (strncmp(cmd_buf, "cqd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "cqd \n"); + goto command_write_done; + } + + ne6x_dbg_show_cqdesc_states(adpt_num, quenue_num, pf); + } else { + for (i = 0; i < count; i++) { + if (cmd_buf[i] == ' ') { + cmd_buf[i] = '\0'; + cnt = i; + break; + } + if (cmd_buf[i] == '\0') { + cnt = i; + break; + } + } + + for (i = 0; i < ARRAY_SIZE(deg_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_cmd_wr[i].command, cnt) == 0) { + deg_cmd_wr[i].command_proc(pf, &cmd_buf[cnt + 1], count - cnt - 1); + goto command_write_done; + } + } + + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6x_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_command_read, + .write = ne6x_dbg_command_write, +}; + +const struct ne6x_dbg_cmd_wr deg_netdev_ops_cmd_wr[] = {}; + +/** + * ne6x_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static const struct file_operations ne6x_dbg_info_pnsn_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_info_pnsn_read, +}; + +static const struct file_operations ne6x_dbg_info_tps_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_proc_tps_read, +}; + +static ssize_t ne6x_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return 0; +} + +/** + * ne6x_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ne6x_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + int i; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NE6X_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + for (i = 0; i < ARRAY_SIZE(deg_netdev_ops_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_netdev_ops_cmd_wr[i].command, count) == 0) { + deg_netdev_ops_cmd_wr[i].command_proc(pf, + &cmd_buf[sizeof(deg_netdev_ops_cmd_wr[i].command) + 1], + count - 1 - sizeof(deg_netdev_ops_cmd_wr[i].command)); + goto command_write_done; + } + } + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6x_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_netdev_ops_read, + .write = ne6x_dbg_netdev_ops_write, +}; + +/** + * ne6x_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void ne6x_dbg_pf_init(struct ne6x_pf *pf) +{ + const struct device *dev = &pf->pdev->dev; + const char *name = pci_name(pf->pdev); + struct dentry *pfile; + + pf->ne6x_dbg_pf = debugfs_create_dir(name, ne6x_dbg_root); + if (!pf->ne6x_dbg_pf) + return; + + pf->ne6x_dbg_info_pf = debugfs_create_dir("info", pf->ne6x_dbg_pf); + if (!pf->ne6x_dbg_info_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->ne6x_dbg_pf, pf, &ne6x_dbg_command_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("netdev_ops", 0600, pf->ne6x_dbg_pf, pf, + &ne6x_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("product_info", 0600, pf->ne6x_dbg_info_pf, pf, + &ne6x_dbg_info_pnsn_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("power_info", 0600, pf->ne6x_dbg_info_pf, pf, + &ne6x_dbg_info_tps_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_err(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->ne6x_dbg_info_pf); + debugfs_remove_recursive(pf->ne6x_dbg_pf); +} + +/** + * ne6x_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void ne6x_dbg_pf_exit(struct ne6x_pf *pf) +{ + debugfs_remove_recursive(pf->ne6x_dbg_info_pf); + pf->ne6x_dbg_info_pf = NULL; + + debugfs_remove_recursive(pf->ne6x_dbg_pf); + pf->ne6x_dbg_pf = NULL; +} + +/** + * ne6x_dbg_init - start up debugfs for the driver + **/ +void ne6x_dbg_init(void) +{ + ne6x_dbg_root = debugfs_create_dir(ne6x_driver_name, NULL); + if (!ne6x_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * ne6x_dbg_exit - clean out the driver's debugfs entries + **/ +void ne6x_dbg_exit(void) +{ + debugfs_remove_recursive(ne6x_dbg_root); + ne6x_dbg_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h new file mode 100644 index 000000000000..2094e52f4b6d --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_DEBUGFS_H +#define _NE6X_DEBUGFS_H + +struct ne6x_debug_table { + int table; + int index; + int size; + u32 data[128]; +}; + +#ifdef CONFIG_DEBUG_FS + +enum fru_product_part { + MANUFACTURER_NAME = 0, + PRODUCT_NAME, + PRODUCT_PART_NUMBER, /* pn */ + PRODUCT_VERSION, + PRODUCT_SERIAL_NUMBER, /* sn */ + PRODUCT_ASSET_TAG, + PRODUCT_FRU_FILE_ID, +}; + +enum fru_type { + INTER_USE_AREA = 0, + CHASSIS_AREA, + BOARD_AREA, + PRODUCT_AREA, + MUILT_AREA, +}; + +#define NE6X_DEBUG_CHAR_LEN 1024 + +#define INFO_ROW 20 +#define INFO_COL 50 + +extern char ne6x_driver_name[]; + +struct ne6x_dbg_cmd_wr { + char command[NE6X_DEBUG_CHAR_LEN]; + void (*command_proc)(struct ne6x_pf *pf, char *cmd_buf, int count); +}; + +struct ne6x_debug_info { + u16 system_id; + char system_name[INFO_COL]; + char system_speed[INFO_COL]; +}; + +void ne6x_dbg_init(void); +void ne6x_dbg_exit(void); + +void ne6x_dbg_pf_init(struct ne6x_pf *pf); +void ne6x_dbg_pf_exit(struct ne6x_pf *pf); +#else /* !CONFIG_DEBUG_FS */ + +static inline void ne6x_dbg_init(void) +{ } +static inline void ne6x_dbg_exit(void) +{ } +static inline void ne6x_dbg_pf_init(struct ne6x_pf *pf) +{ } +static inline void ne6x_dbg_pf_exit(struct ne6x_pf *pf) +{ } +#endif /* end CONFIG_DEBUG_FS */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c new file mode 100644 index 000000000000..70381bd6ebc9 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c @@ -0,0 +1,1602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "reg.h" + +#define NE6X_SDK_CRC32_DATA_LEN 256 + +#define NE6X_PPORT_BY_HWINFO(HWINFO, index) (((HWINFO) >> (8 * (index))) & 0xff) + +#define to_be32_vector(s, e, p) \ +({ \ + int __n; \ + u32 *__data = (u32 *)(p);\ + for (__n = (s); __n < (e); __n++) \ + __data[__n] = cpu_to_be32(__data[__n]); \ +}) + +void ext_toeplitz_key(const unsigned char *key, unsigned char *ext_key) +{ + int i; + + for (i = 0; i < 39; i++) { + ext_key[i] = key[i]; + ext_key[44 + i] = (key[i] << 1) | (key[i + 1] >> 7); + ext_key[44 * 2 + i] = (key[i] << 2) | (key[i + 1] >> 6); + ext_key[44 * 3 + i] = (key[i] << 3) | (key[i + 1] >> 5); + ext_key[44 * 4 + i] = (key[i] << 4) | (key[i + 1] >> 4); + ext_key[44 * 5 + i] = (key[i] << 5) | (key[i + 1] >> 3); + ext_key[44 * 6 + i] = (key[i] << 6) | (key[i + 1] >> 2); + ext_key[44 * 7 + i] = (key[i] << 7) | (key[i + 1] >> 1); + } + + ext_key[39] = key[39]; + ext_key[44 + 39] = (key[39] << 1) | (key[1] >> 7); + ext_key[44 * 2 + 39] = (key[39] << 2) | (key[1] >> 6); + ext_key[44 * 3 + 39] = (key[39] << 3) | (key[1] >> 5); + ext_key[44 * 4 + 39] = (key[39] << 4) | (key[1] >> 4); + ext_key[44 * 5 + 39] = (key[39] << 5) | (key[1] >> 3); + ext_key[44 * 6 + 39] = (key[39] << 6) | (key[1] >> 2); + ext_key[44 * 7 + 39] = (key[39] << 7) | (key[1] >> 1); + + for (i = 0; i < 4; i++) { + ext_key[40 + i] = ext_key[i]; + ext_key[44 + 40 + i] = ext_key[44 + i]; + ext_key[44 * 2 + 40 + i] = ext_key[44 * 2 + i]; + ext_key[44 * 3 + 40 + i] = ext_key[44 * 3 + i]; + ext_key[44 * 4 + 40 + i] = ext_key[44 * 4 + i]; + ext_key[44 * 5 + 40 + i] = ext_key[44 * 5 + i]; + ext_key[44 * 6 + 40 + i] = ext_key[44 * 6 + i]; + ext_key[44 * 7 + 40 + i] = ext_key[44 * 7 + i]; + } +} + +static u32 ne6x_dev_bitrev(u32 input, int bw) +{ + u32 var = 0; + int i; + + for (i = 0; i < bw; i++) { + if (input & 0x01) + var |= 1 << (bw - 1 - i); + + input >>= 1; + } + + return var; +} + +void ne6x_dev_crc32_init(u32 poly, u32 *table) +{ + u32 c; + int i, j; + + poly = ne6x_dev_bitrev(poly, 32); + + for (i = 0; i < NE6X_SDK_CRC32_DATA_LEN; i++) { + c = i; + for (j = 0; j < 8; j++) { + if (c & 1) + c = poly ^ (c >> 1); + else + c = c >> 1; + } + table[i] = c; + } +} + +u32 ne6x_dev_crc32(const u8 *buf, u32 size) +{ + u32 ne6x_sdk_crc32tab[NE6X_SDK_CRC32_DATA_LEN]; + u32 i, crc; + + ne6x_dev_crc32_init(0x4C11DB7, ne6x_sdk_crc32tab); + crc = 0xFFFFFFFF; + + for (i = 0; i < size; i++) + crc = ne6x_sdk_crc32tab[(crc ^ buf[i]) & 0xff] ^ (crc >> 8); + + return crc ^ 0xFFFFFFFF; +} + +int ne6x_dev_spd_verify(struct ne6x_dev_eeprom_info *spd_info) +{ + if (be32_to_cpu(spd_info->spd_verify_value) == + ne6x_dev_crc32((const u8 *)spd_info, sizeof(*spd_info) - 4)) + return 0; + + return -EINVAL; +} + +int ne6x_dev_get_eeprom(struct ne6x_pf *pf) +{ + int retry = 3; + + while (retry-- > 0) { + ne6x_reg_e2prom_read(pf, 0x0, (u8 *)&pf->sdk_spd_info, sizeof(pf->sdk_spd_info)); + if (!ne6x_dev_spd_verify(&pf->sdk_spd_info)) + return 0; + } + + memset(&pf->sdk_spd_info, 0, sizeof(pf->sdk_spd_info)); + + return -EINVAL; +} + +static int ne6x_dev_get_dev_info(struct ne6x_pf *pf) +{ + int ret; + + ret = ne6x_dev_get_eeprom(pf); + if (!ret) { + pf->dev_type = be16_to_cpu(pf->sdk_spd_info.product_mode); + pf->hw_flag = be32_to_cpu(pf->sdk_spd_info.hw_flag); + if (!pf->hw_flag) + pf->hw_flag = 1; + } else { + dev_err(ne6x_pf_to_dev(pf), "get eeprom fail\n"); + } + + return ret; +} + +int ne6x_dev_set_white_list(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + if (pf->hw_flag == 1 || pf->hw_flag == 2) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_WHITELIST_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + dev_info(ne6x_pf_to_dev(pf), "hw not support white list func\n"); + return -EOPNOTSUPP; + } + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_WHITELIST_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } + + return 0; +} + +void ne6x_dev_set_ddos(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +void ne6x_dev_set_trust_vlan(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_TRUST_VLAN_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_TRUST_VLAN_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +bool ne6x_dev_get_trust_vlan(struct ne6x_pf *pf) +{ + u32 data; + + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + if (data & NE6X_F_TRUST_VLAN_ENABLED) + return true; + return false; +} + +int ne6x_dev_get_pport(struct ne6x_adapter *adpt) +{ + u32 lport_to_phy; + + if (!adpt) + return 0; + + switch (adpt->back->dev_type) { + case NE6000AI_2S_X16H_25G_N5: + return adpt->idx; + default: + break; + } + + lport_to_phy = adpt->back->sdk_spd_info.logic_port_to_phyical; + + return NE6X_PPORT_BY_HWINFO(be32_to_cpu(lport_to_phy), adpt->idx); +} + +static void ne6x_dev_set_roce_icrc_offload(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_S_ROCE_ICRC_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_S_ROCE_ICRC_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +int ne6x_dev_init(struct ne6x_pf *pf) +{ + if (unlikely(ne6x_dev_get_dev_info(pf))) + return -EINVAL; + + ne6x_reg_get_ver(pf, &pf->verinfo); + ne6x_dev_clear_vport(pf); + ne6x_dev_set_fast_mode(pf, false, 0); + ne6x_dev_set_roce_icrc_offload(pf, true); + + return 0; +} + +int ne6x_dev_get_mac_addr(struct ne6x_adapter *adpt, u8 *mac) +{ + struct ne6x_dev_eeprom_info *info = &adpt->back->sdk_spd_info; + + memset(mac, 0, 6); + switch (adpt->idx) { + case 0: + ether_addr_copy(mac, &info->port_0_mac[0]); + break; + case 1: + ether_addr_copy(mac, &info->port_1_mac[0]); + break; + case 2: + ether_addr_copy(mac, &info->port_2_mac[0]); + break; + case 3: + ether_addr_copy(mac, &info->port_3_mac[0]); + break; + default: + return -1; + } + + return 0; +} + +int ne6x_dev_get_port_num(struct ne6x_pf *pf) +{ + return pf->sdk_spd_info.number_of_physical_controllers; +} + +int ne6x_dev_get_temperature_info(struct ne6x_pf *pf, struct ne6x_soc_temperature *temp) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_TEMPERATURE, (u32 *)temp, sizeof(*temp)); +} + +int ne6x_dev_get_power_consum(struct ne6x_pf *pf, struct ne6x_soc_power *power) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_POWER_CONSUM, (u32 *)power, sizeof(*power)); +} + +int ne6x_dev_i2c3_signal_test(struct ne6x_pf *pf, u32 *id) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_I2C3_TEST, (u32 *)id, sizeof(u32)); +} + +int ne6x_dev_get_fru(struct ne6x_pf *pf, u32 *buffer, u32 size) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_FRU, buffer, size); +} + +int ne6x_dev_start_ddr_test(struct ne6x_pf *pf) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_DDR_TEST, NULL, 0); +} + +int ne6x_dev_read_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size) +{ + return ne6x_reg_e2prom_read(adpt->back, offset, pbuf, size); +} + +int ne6x_dev_write_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size) +{ + return ne6x_reg_e2prom_write(adpt->back, offset, pbuf, size); +} + +int ne6x_dev_get_link_status(struct ne6x_adapter *adpt, struct ne6x_link_info *status) +{ + u32 link_speed = ne6x_reg_apb_read(adpt->back, 0x2087FB00 + 4 * ADPT_LPORT(adpt)); + + status->link = link_speed >> 16; + status->speed = link_speed & 0xffff; + + return 0; +} + +int ne6x_dev_get_sfp_status(struct ne6x_adapter *adpt, u8 *status) +{ + u32 sfp_state; + + sfp_state = ne6x_reg_apb_read(adpt->back, 0x2087FB40 + 4 * ADPT_LPORT(adpt)); + *status = sfp_state & 0x1; + + return 0; +} + +void ne6x_dev_update_status(struct ne6x_pf *pf, struct ne6x_port_info *port, bool is_up) +{ + u32 speed = NE6X_LINK_SPEED_25GB; + struct ne6x_phy_info *phy = &port->phy; + struct ne6x_link_status *link = &phy->link_info; + + if (!is_up) { + link->phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + link->link_speed = speed; + link->link_info &= ~NE6X_AQ_LINK_UP; + phy->media_type = NE6X_MEDIA_UNKNOWN; + return; + } + + link->link_info |= NE6X_AQ_LINK_UP; + switch (speed) { + case NE6X_LINK_SPEED_10GB: + link->phy_type_low = NE6X_PHY_TYPE_10GBASE; + link->link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + link->phy_type_low = NE6X_PHY_TYPE_25GBASE; + link->link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + link->phy_type_low = NE6X_PHY_TYPE_40GBASE; + link->link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + link->phy_type_low = NE6X_PHY_TYPE_100GBASE; + link->link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + link->phy_type_low = NE6X_PHY_TYPE_200GBASE; + link->link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_warn(ne6x_pf_to_dev(pf), "Unrecognized link_speed (0x%x).\n", speed); + break; + } + + phy->media_type = NE6X_MEDIA_FIBER; +} + +int ne6x_dev_self_test_link(struct ne6x_adapter *adpt, int *verify) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_LINK_STATUS, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)verify, sizeof(int)); +} + +int ne6x_dev_reset_firmware(struct ne6x_adapter *adpt) +{ + return ne6x_reg_reset_firmware(adpt->back); +} + +int ne6x_dev_get_speed(struct ne6x_adapter *adpt, u32 *speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SPEED, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)speed, sizeof(u32)); +} + +int ne6x_dev_set_speed(struct ne6x_adapter *adpt, u32 speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SPEED, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&speed, sizeof(u32)); +} + +int ne6x_dev_get_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_PAUSE, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)fctrl, sizeof(fctrl)); +} + +int ne6x_dev_set_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_PAUSE, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)fctrl, sizeof(*fctrl)); +} + +int ne6x_dev_get_mac_stats(struct ne6x_adapter *adpt) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATS, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)&adpt->stats, sizeof(adpt->stats)); +} + +int ne6x_dev_set_mtu(struct ne6x_adapter *adpt, u32 mtu) +{ + u32 max_length = mtu + 18; + + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_MAX_FRAME, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&max_length, sizeof(max_length)); +} + +int ne6x_dev_get_mtu(struct ne6x_adapter *adpt, u32 *mtu) +{ + u32 max_length; + int ret; + + ret = ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_MAX_FRAME, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)&max_length, sizeof(max_length)); + *mtu = max_length - 18; + + return ret; +} + +static int fastlog2(int x) +{ + int idx; + + for (idx = 31; idx >= 0; idx--) { + if (x & (1 << idx)) + break; + } + + return idx; +} + +int ne6x_dev_set_rss(struct ne6x_adapter *adpt, struct ne6x_rss_info *cfg) +{ + struct rss_table rss; + u32 *rss_data = (u32 *)&rss; + int ret, i; + + memset(&rss, 0x00, sizeof(rss)); + rss.flag = cpu_to_be32(0x01); /* valid bit */ + rss.hash_fun = (cfg->hash_func << 24) & 0xFF000000; + rss.hash_fun |= (cfg->hash_type & 0xFFFFFF); + rss.hash_fun = cpu_to_be32(rss.hash_fun); + rss.queue_base = cpu_to_be32(ADPT_VPORTCOS(adpt)); + rss.queue_def = cpu_to_be16(0x0); + rss.queue_size = cpu_to_be16(adpt->num_queue); + rss.entry_num = fastlog2(cfg->ind_table_size); + rss.entry_num = cpu_to_be16(rss.entry_num); + rss.entry_size = cpu_to_be16(0x0); + + for (i = 0; i < cfg->ind_table_size; i++) + rss.entry_data[i] = cfg->ind_table[i]; + + ext_toeplitz_key(&cfg->hash_key[0], &rss.hash_key[0]); + + for (i = 0; i < 128; i++) + rss_data[i] = cpu_to_be32(rss_data[i]); + + ret = ne6x_reg_table_write(adpt->back, NE6X_REG_RSS_TABLE, ADPT_VPORT(adpt), + (void *)&rss, sizeof(rss)); + return ret; +} + +int ne6x_dev_upgrade_firmware(struct ne6x_adapter *adpt, u8 region, u8 *data, int size, int flags) +{ + int ret; + + clear_bit(NE6X_LINK_POOLING, adpt->back->state); + ret = ne6x_reg_upgrade_firmware(adpt->back, region, data, size); + set_bit(NE6X_LINK_POOLING, adpt->back->state); + + return ret; +} + +int ne6x_dev_get_sfp_type_len(struct ne6x_adapter *adpt, struct ne6x_sfp_mod_type_len *sfp_mode) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_TYPE_LEN, NE6X_TALK_GET, + ADPT_LPORT(adpt), sfp_mode, sizeof(*sfp_mode)); +} + +int ne6x_dev_get_sfp_eeprom(struct ne6x_adapter *adpt, u8 *data, int offset, int size, int flags) +{ + return ne6x_reg_get_sfp_eeprom(adpt->back, ADPT_LPORT(adpt), data, offset, size); +} + +int ne6x_dev_clear_stats(struct ne6x_adapter *adpt) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATS, NE6X_TALK_SET, + ADPT_LPORT(adpt), NULL, 0); +} + +/* panel port mapped to logical port */ +void ne6x_dev_set_port2pi(struct ne6x_adapter *adpt) +{ + u32 val = (ADPT_LPORT(adpt) << 24) | (ADPT_VPORT(adpt) << 16) | + (adpt->port_info->hw_queue_base + 160); + + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT2PI_0 + ADPT_PPORT(adpt)), val); +} + +/* logical port mapped to panel port */ +void ne6x_dev_set_pi2port(struct ne6x_adapter *adpt) +{ + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI2PORT_0 + ADPT_LPORT(adpt)), + ADPT_PPORT(adpt)); +} + +/* clear vport map */ +void ne6x_dev_clear_vport(struct ne6x_pf *pf) +{ + int idx; + + for (idx = 0; idx < 32; idx++) + ne6x_reg_set_user_data(pf, (NP_USER_DATA_PORT_2_COS_0 + idx), 0); + + for (idx = 0; idx < 64; idx++) + ne6x_reg_set_user_data(pf, (NP_USER_DATA_PORT_OLFLAGS_0 + idx), 0); +} + +/* automatically generating vp_base_cos */ +int ne6x_dev_set_vport(struct ne6x_adapter *adpt) +{ + u16 port = adpt->vport >> 1; + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), &val); + + /* pf base cos */ + if (adpt->vport & 0x1) { + val &= 0xFFFF; + val |= ((adpt->port_info->hw_queue_base + 160) << 16); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), val); + } else { + val &= 0xFFFF0000; + val |= (adpt->port_info->hw_queue_base + 160); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), val); + } + + return 0; +} + +int ne6x_dev_get_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp) +{ + pbmp_t new_pbmp; + int ret; + + PBMP_CLEAR(new_pbmp); + ret = ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, + ADPT_LPORT(adpt) * 4096 + (vlan_id & 0xFFF), + (void *)new_pbmp, + sizeof(pbmp_t)); + + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + return ret; +} + +int ne6x_dev_set_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp) +{ + pbmp_t new_pbmp; + + PBMP_CLEAR(new_pbmp); + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + return ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, + ADPT_LPORT(adpt) * 4096 + (vlan_id & 0xFFF), + (void *)new_pbmp, sizeof(pbmp_t)); +} + +int ne6x_dev_vlan_add(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan) +{ + pbmp_t pbmp, new_pbmp; + u16 index = 0; + + if (vlan->tpid == ETH_P_8021Q) + index = ADPT_LPORT(adpt) * 4096; + else if (vlan->tpid == ETH_P_8021AD) + index = 4 * 4096 + ADPT_LPORT(adpt) * 4096; + + memset(pbmp, 0, sizeof(pbmp_t)); + memset(new_pbmp, 0, sizeof(pbmp_t)); + + ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + memset(new_pbmp, 0, sizeof(pbmp)); + + PBMP_PORT_ADD(pbmp, adpt->vport); + + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + return 0; +} + +int ne6x_dev_vlan_del(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan) +{ + pbmp_t pbmp, new_pbmp; + u16 index = 0; + + if (vlan->tpid == ETH_P_8021Q) + index = ADPT_LPORT(adpt) * 4096; + else if (vlan->tpid == ETH_P_8021AD) + index = 4 * 4096 + ADPT_LPORT(adpt) * 4096; + + memset(pbmp, 0, sizeof(pbmp)); + memset(new_pbmp, 0, sizeof(pbmp)); + + ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + memset(new_pbmp, 0, sizeof(pbmp)); + + PBMP_PORT_REMOVE(pbmp, adpt->vport); + + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + return 0; +} + +/* clear vlan table */ +int ne6x_dev_clear_vlan_map(struct ne6x_pf *pf) +{ + pbmp_t pbmp; + int index; + + PBMP_CLEAR(pbmp); + for (index = 0; index < 8192; index++) + ne6x_reg_table_write(pf, NE6X_REG_VLAN_TABLE, index, (void *)pbmp, sizeof(pbmp)); + + return 0; +} + +/* port add qinq */ +int ne6x_dev_add_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid) +{ + struct ne6x_vf_vlan vlan; + u32 val = 0; + + memset(&vlan, 0, sizeof(vlan)); + + vlan.tpid = proto; + vlan.vid = vid; + + memcpy(&val, &vlan, sizeof(u32)); + ne6x_reg_set_user_data(vf->adpt->back, NP_USER_DATA_PORT0_QINQ + ADPT_VPORT(vf->adpt), val); + + return 0; +} + +/* port del qinq */ +int ne6x_dev_del_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid) +{ + ne6x_reg_set_user_data(vf->adpt->back, NP_USER_DATA_PORT0_QINQ + ADPT_VPORT(vf->adpt), 0); + + return 0; +} + +int ne6x_dev_set_uc_promiscuous_enable(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + if (enable) + val |= NE6X_F_PROMISC; + else + val &= ~NE6X_F_PROMISC; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_set_mc_promiscuous_enable(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + if (enable) + val |= NE6X_F_RX_ALLMULTI; + else + val &= ~NE6X_F_RX_ALLMULTI; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +static void ne6x_dev_update_uc_leaf(struct l2fdb_dest_unicast *unicast, struct ne6x_adapter *adpt, + bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(unicast->vp_bmp[vport / 32], vport % 32) : + CLR_BIT(unicast->vp_bmp[vport / 32], vport % 32); + + unicast->cnt = 0; +} + +int ne6x_dev_add_unicast_for_fastmode(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_fast_table db; + + memcpy(&db.mac[0], mac, 6); + db.start_cos = ADPT_VPORTCOS(adpt); + db.cos_num = adpt->num_queue; + + to_be32_vector(0, sizeof(db) / 4, &db); + + return ne6x_reg_set_unicast_for_fastmode(adpt->back, ADPT_VPORT(adpt), + (u32 *)&db, sizeof(db)); +} + +int ne6x_dev_add_unicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + u32 tid = 0xffffffff; + int ret; + + if (adpt->back->is_fastmode) + ne6x_dev_add_unicast_for_fastmode(adpt, mac); + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 16, &db); + + ret = ne6x_add_key(adpt, mac, 6); + if (!ret) { + memset(&db, 0, 128); + memcpy(&db.mac[0], mac, 6); + db.pport = ADPT_LPORT(adpt); + db.vlanid = 0; + + memset(&db.fw_info.unicast, 0, sizeof(db.fw_info.unicast)); + db.fw_info.unicast.flags = 0x1; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, true); + + to_be32_vector(0, 17, &db); + + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 128, &tid); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), + "insert unicast table %x %02x %02x %02x %02x %02x %02x fail\n", + ADPT_LPORT(adpt), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } else { + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + db.fw_info.unicast.flags = 0x1; + db.fw_info.unicast.vp_bmp[0] = res.fw_info.unicast.vp_bmp[0]; + db.fw_info.unicast.vp_bmp[1] = res.fw_info.unicast.vp_bmp[1]; + db.fw_info.unicast.vp_bmp[2] = res.fw_info.unicast.vp_bmp[2]; + db.fw_info.unicast.cnt = res.fw_info.unicast.cnt; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, true); + + to_be32_vector(16, 17, &db); + + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + } + + return 0; +} + +static int ne6x_dev_del_unicast_for_fastmode(struct ne6x_adapter *adpt) +{ + struct l2fdb_fast_table db; + + memset(&db, 0, sizeof(db)); + + return ne6x_reg_set_unicast_for_fastmode(adpt->back, ADPT_VPORT(adpt), + (u32 *)&db, sizeof(db)); +} + +int ne6x_dev_del_unicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + int ret = 0; + + if (adpt->back->is_fastmode) + ne6x_dev_del_unicast_for_fastmode(adpt); + + ret = ne6x_del_key(adpt, mac, 6); + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + + memset(&db, 0, sizeof(db)); + memcpy(&db.mac[0], mac, 6); + db.vlanid = 0; + db.pport = ADPT_LPORT(adpt); + db.fw_info.unicast.flags = 0x1; + db.fw_info.unicast.vp_bmp[0] = res.fw_info.unicast.vp_bmp[0]; + db.fw_info.unicast.vp_bmp[1] = res.fw_info.unicast.vp_bmp[1]; + db.fw_info.unicast.vp_bmp[2] = res.fw_info.unicast.vp_bmp[2]; + db.fw_info.unicast.cnt = res.fw_info.unicast.cnt; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, false); + + to_be32_vector(0, 17, &db); + + if (!ret) + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_L2FDB_TABLE, (u32 *)&db, 64); + else + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + + return 0; +} + +static void ne6x_dev_update_mc_leaf(struct l2fdb_dest_multicast *multicast, + struct ne6x_adapter *adpt, bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(multicast->vp_bmp[vport / 32], vport % 32) : + CLR_BIT(multicast->vp_bmp[vport / 32], vport % 32); +} + +int ne6x_dev_add_multicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + u32 tid = 0xffffffff; + int ret; + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + ret = ne6x_add_key(adpt, mac, 6); + if (!ret) { + memset(&db, 0, sizeof(db)); + memcpy(&db.mac[0], mac, 6); + db.pport = ADPT_LPORT(adpt); + + memset(&db.fw_info.multicast, 0, sizeof(db.fw_info.multicast)); + db.fw_info.multicast.flags = 0x3; + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, true); + + to_be32_vector(0, 17, &db); + + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 128, &tid); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), + "insert multicast table %x %02x %02x %02x %02x %02x %02x fail\n", + ADPT_LPORT(adpt), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } else { + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + + db.fw_info.multicast.flags = 0x3; + db.fw_info.multicast.vp_bmp[0] = res.fw_info.multicast.vp_bmp[0]; + db.fw_info.multicast.vp_bmp[1] = res.fw_info.multicast.vp_bmp[1]; + db.fw_info.multicast.vp_bmp[2] = res.fw_info.multicast.vp_bmp[2]; + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, true); + + to_be32_vector(16, 17, &db); + + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + } + + return 0; +} + +int ne6x_dev_del_multicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + int ret; + + ret = ne6x_del_key(adpt, mac, 6); + + memset(&db, 0, sizeof(db)); + + /* hash_key */ + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + /* mac info */ + ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + memset(&db, 0, 128); + memcpy(&db.mac[0], mac, 6); + db.vlanid = 0; + db.pport = ADPT_LPORT(adpt); + db.fw_info.multicast.flags = 0x3; + db.fw_info.multicast.vp_bmp[0] = res.fw_info.multicast.vp_bmp[0]; + db.fw_info.multicast.vp_bmp[1] = res.fw_info.multicast.vp_bmp[1]; + db.fw_info.multicast.vp_bmp[2] = res.fw_info.multicast.vp_bmp[2]; + + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, false); + + to_be32_vector(0, 17, &db); + + if (!ret) + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_L2FDB_TABLE, (u32 *)&db, 64); + else + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + + return ret; +} + +inline void ne6x_dev_update_boradcast_leaf(u32 *leaf, struct ne6x_adapter *adpt, bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(*leaf, vport % 32) : CLR_BIT(*leaf, vport % 32); +} + +int ne6x_dev_add_broadcast_leaf(struct ne6x_adapter *adpt) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), &val); + ne6x_dev_update_boradcast_leaf(&val, adpt, true); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), val); + + return 0; +} + +int ne6x_dev_del_broadcast_leaf(struct ne6x_adapter *adpt) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), &val); + ne6x_dev_update_boradcast_leaf(&val, adpt, false); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), val); + + return 0; +} + +u32 ne6x_dev_get_features(struct ne6x_adapter *adpt) +{ + int val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + return val; +} + +int ne6x_dev_set_features(struct ne6x_adapter *adpt, u32 val) +{ + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_enable_rxhash(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + if (enable) + val |= NE6X_F_RSS; + else + val &= ~NE6X_F_RSS; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_set_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state fec) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_FEC, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&fec, sizeof(int)); +} + +int ne6x_dev_set_mac_inloop(struct ne6x_adapter *adpt, int enable) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_LOOPBACK, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&enable, sizeof(int)); +} + +int ne6x_dev_get_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state *fec) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_FEC, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)fec, sizeof(int)); +} + +int ne6x_dev_set_sfp_speed(struct ne6x_adapter *adpt, u32 speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_SPEED, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&speed, sizeof(u32)); +} + +int ne6x_dev_get_sfp_speed(struct ne6x_adapter *adpt, u32 *speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_SPEED, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)speed, sizeof(u32)); +} + +int ne6x_dev_set_if_state(struct ne6x_adapter *adpt, u32 state) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATE, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&state, sizeof(u32)); +} + +int ne6x_dev_get_if_state(struct ne6x_adapter *adpt, u32 *state) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATE, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)state, sizeof(u32)); +} + +int ne6x_dev_set_nic_stop(struct ne6x_pf *pf, u32 flag) +{ + return ne6x_reg_nic_stop(pf, flag); +} + +int ne6x_dev_set_nic_start(struct ne6x_pf *pf, u32 flag) +{ + return ne6x_reg_nic_start(pf, flag); +} + +int ne6x_dev_set_led(struct ne6x_adapter *adpt, bool state) +{ + return ne6x_reg_set_led(adpt->back, ADPT_LPORT(adpt), state); +} + +void ne6x_dev_transform_vf_stat_format(u32 *stat_arr, struct vf_stat *stat) +{ + u32 start_pos = 0; + + stat->rx_malform_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_drop_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_broadcast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_multicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_unicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_broadcast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_multicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_unicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 16; + stat->tx_malform_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; +} + +int ne6x_dev_get_vf_stat(struct ne6x_adapter *adpt, struct vf_stat *stat) +{ + u32 stat_arr[64]; + int ret; + + ret = ne6x_reg_table_read(adpt->back, NE6X_REG_VF_STAT_TABLE, ADPT_VPORT(adpt), + (u32 *)&stat_arr[0], sizeof(stat_arr)); + ne6x_dev_transform_vf_stat_format(stat_arr, stat); + + return ret; +} + +int ne6x_dev_reset_vf_stat(struct ne6x_adapter *adpt) +{ + u32 stat_arr[64] = {0}; + + return ne6x_reg_table_write(adpt->back, NE6X_REG_VF_STAT_TABLE, ADPT_VPORT(adpt), + (u32 *)&stat_arr[0], sizeof(stat_arr)); +} + +int ne6x_dev_check_speed(struct ne6x_adapter *adpt, u32 speed) +{ + switch (adpt->back->dev_type) { + case NE6000AI_2S_X16H_25G_N5: + case NE6000AI_2S_X16H_25G_N6: + if (speed == SPEED_25000 || speed == SPEED_10000) + return 0; + + return -EOPNOTSUPP; + case NE6000AI_2S_X16H_100G_N5: + if (speed == SPEED_40000 || speed == SPEED_100000) + return 0; + + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +int ne6x_dev_set_fw_lldp(struct ne6x_adapter *adpt, bool state) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + if (state) + val |= NE6X_F_RX_FW_LLDP; + else + val &= ~NE6X_F_RX_FW_LLDP; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +#define NE6X_METER_STEP 152 +#define NE6X_DF_METER_CBS_PBS (100 * 152) +int ne6x_dev_set_vf_bw(struct ne6x_adapter *adpt, int tx_rate) +{ + u32 val = 0, ret = 0; + u32 cir = 0, cbs = 0; + struct meter_table vf_bw; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + memset(&vf_bw, 0, sizeof(struct meter_table)); + + if (tx_rate) + val |= NE6X_F_TX_QOSBANDWIDTH; + else + val &= ~NE6X_F_TX_QOSBANDWIDTH; + + if (tx_rate) { + cir = tx_rate; + cbs = 0xffffff; + vf_bw.pbs = cbs; + vf_bw.cir = cir; + vf_bw.cbs = cbs; + vf_bw.pir = cir; + ret = ne6x_reg_config_meter(adpt->back, + NE6X_METER0_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + ADPT_VPORT(adpt), + (u32 *)&vf_bw, sizeof(vf_bw)); + ne6x_reg_set_user_data(adpt->back, + NP_USER_DATA_PORT_OLFLAGS_0 + + ADPT_VPORT(adpt), + val); + } else { + ne6x_reg_set_user_data(adpt->back, + NP_USER_DATA_PORT_OLFLAGS_0 + + ADPT_VPORT(adpt), + val); + ret = ne6x_reg_config_meter(adpt->back, + NE6X_METER0_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + ADPT_VPORT(adpt), + (u32 *)&vf_bw, sizeof(vf_bw)); + } + + return ret; +} + +static int ne6x_dev_reg_pattern_test(struct ne6x_pf *pf, u32 reg, u32 val_arg) +{ + struct device *dev; + u32 val, orig_val; + + orig_val = ne6x_reg_apb_read(pf, reg); + dev = ne6x_pf_to_dev(pf); + + ne6x_reg_apb_write(pf, reg, val_arg); + val = ne6x_reg_apb_read(pf, reg); + if (val != val_arg) { + dev_err(dev, "%s: reg pattern test failed - reg 0x%08x val 0x%08x\n", + __func__, reg, val); + return -1; + } + + ne6x_reg_apb_write(pf, reg, orig_val); + val = ne6x_reg_apb_read(pf, reg); + if (val != orig_val) { + dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n", + __func__, reg, orig_val, val); + return -1; + } + + return 0; +} + +#define NE6X_TEST_INT_SET_VALUE 0x1000000000000000 /* bit 60 */ +int ne6x_dev_test_intr(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int base = adpt->base_vector; + union ne6x_vp_int vp_int; + int ret = -1; + + if (base < NE6X_PF_VP0_NUM) { + vp_int.val = rd64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT)); + wr64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT_SET), + NE6X_TEST_INT_SET_VALUE); + vp_int.val = rd64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT)); + if (vp_int.val & NE6X_TEST_INT_SET_VALUE) { + ret = 0; + vp_int.val &= ~NE6X_TEST_INT_SET_VALUE; + wr64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT), vp_int.val); + } + } else { + vp_int.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT_SET), + NE6X_TEST_INT_SET_VALUE); + vp_int.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + if (vp_int.val & NE6X_TEST_INT_SET_VALUE) { + ret = 0; + vp_int.val &= ~NE6X_TEST_INT_SET_VALUE; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT), vp_int.val); + } + } + + return ret; +} + +int ne6x_dev_test_reg(struct ne6x_adapter *adpt) +{ + struct ne6x_diag_reg_info test_reg[4] = { + {0x20a00180, 0x5A5A5A5A}, + {0x20a00180, 0xA5A5A5A5}, + {0x20a00188, 0x00000000}, + {0x20a0018c, 0xFFFFFFFF} + }; + u32 value, reg; + int index; + + netdev_dbg(adpt->netdev, "Register test\n"); + for (index = 0; index < ARRAY_SIZE(test_reg); ++index) { + value = test_reg[index].value; + reg = test_reg[index].address; + + /* bail on failure (non-zero return) */ + if (ne6x_dev_reg_pattern_test(adpt->back, reg, value)) + return 1; + } + + return 0; +} + +#define NE6X_LOOP_TEST_TYPE 0x1234 +/* handle hook packet */ +int ne6x_dev_proto_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *ndev) +{ + struct ne6x_netdev_priv *np = netdev_priv(dev); + struct ne6x_adapter *adpt = np->adpt; + + netdev_info(dev, "recv loopback test packet success!\n"); + adpt->recv_done = true; + + kfree_skb(skb); + wake_up(&adpt->recv_notify); + + return 0; +} + +static u8 loop_dst_mac[8] = {0x00, 0x00, 0x00, 0x11, 0x11, 0x01}; +int ne6x_dev_proto_send(struct net_device *netdev, char *buf, int len) +{ + struct sk_buff *skb; + u8 *pdata = NULL; + u32 skb_len; + + skb_len = LL_RESERVED_SPACE(netdev) + len; + skb = dev_alloc_skb(skb_len); + if (!skb) + return -1; + + skb_reserve(skb, LL_RESERVED_SPACE(netdev)); + skb->dev = netdev; + skb->ip_summed = CHECKSUM_NONE; + skb->priority = 0; + pdata = skb_put(skb, len); + if (pdata) + memcpy(pdata, buf, len); + + /* send loop test packet */ + if (dev_queue_xmit(skb) < 0) { + dev_put(netdev); + kfree_skb(skb); + netdev_err(netdev, "send pkt fail.\n"); + return -1; + } + netdev_info(netdev, "send loopback test packet success!\n"); + + return 0; +} + +int ne6x_dev_test_loopback(struct ne6x_adapter *adpt) +{ + struct packet_type prot_hook; + struct ethhdr *ether_hdr; + u32 old_value; + int ret = 0; + + adpt->send_buffer = kzalloc(2048, GFP_KERNEL); + if (!adpt->send_buffer) + return -ENOMEM; + + /* config mac/pcs loopback */ + if (ne6x_dev_set_mac_inloop(adpt, true)) { + netdev_err(adpt->netdev, "loopback test set_mac_inloop fail !\n"); + return -1; + } + + old_value = ne6x_dev_get_features(adpt); + ne6x_dev_set_uc_promiscuous_enable(adpt, true); + memset(&prot_hook, 0, sizeof(struct packet_type)); + prot_hook.type = cpu_to_be16(NE6X_LOOP_TEST_TYPE); + prot_hook.dev = adpt->netdev; + prot_hook.func = ne6x_dev_proto_recv; + dev_add_pack(&prot_hook); + ether_hdr = (struct ethhdr *)adpt->send_buffer; + memcpy(ether_hdr->h_source, &adpt->port_info->mac.perm_addr[0], ETH_ALEN); + memcpy(ether_hdr->h_dest, loop_dst_mac, ETH_ALEN); + ether_hdr->h_proto = cpu_to_be16(NE6X_LOOP_TEST_TYPE); + adpt->send_buffer[14] = 0x45; + ne6x_dev_proto_send(adpt->netdev, adpt->send_buffer, 1024); + + if (wait_event_interruptible_timeout(adpt->recv_notify, !!adpt->recv_done, + msecs_to_jiffies(2000)) <= 0) { + netdev_info(adpt->netdev, "loopback test fail !\n"); + ret = -1; + } + + adpt->recv_done = false; + kfree(adpt->send_buffer); + adpt->send_buffer = NULL; + /* restore prosimc */ + ne6x_dev_set_features(adpt, old_value); + dev_remove_pack(&prot_hook); + if (ne6x_dev_set_mac_inloop(adpt, false)) { + netdev_err(adpt->netdev, "loopback test cancel_mac_inloop fail\n"); + return -1; + } + + return ret; +} + +int ne6x_dev_set_port_mac(struct ne6x_adapter *adpt, u8 *data) +{ + u8 mac_info[8]; + + memcpy(mac_info, data, 6); + + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_INFO, NE6X_TALK_SET, ADPT_LPORT(adpt), + (void *)data, sizeof(mac_info)); +} + +static u32 crc_table[CRC32_TABLE_SIZE]; /* 1KB */ +static void ne6x_dev_crc32_for_fw_init(void) +{ + u32 remainder; + u32 dividend; + s32 bit; + + for (dividend = 0U; dividend < CRC32_TABLE_SIZE; ++dividend) { + remainder = dividend; + for (bit = 8; bit > 0; --bit) { + if ((remainder & 1U) != 0) + remainder = (remainder >> 1) ^ CRC32_REVERSED_POLYNOMIAL; + else + remainder >>= 1; + } + + crc_table[dividend] = remainder; + } +} + +static u32 ne6x_dev_crc32_for_fw(const void *message, u32 bytes) +{ + const u8 *buffer = (const u8 *)message; + u32 remainder = CRC32_INITIAL_REMAINDER; + u8 idx; + + ne6x_dev_crc32_for_fw_init(); + + while (bytes-- > 0) { + idx = (u8)(*buffer++ ^ remainder); + remainder = crc_table[idx] ^ (remainder >> 8); + } + + return remainder ^ CRC32_FINALIZE_REMAINDER; +} + +static int ne6x_dev_get_fw_region(const u8 *data, u32 size, int *region) +{ + if (size < NE6X_FW_SIG_LENGTH) + return NE6X_FW_NOT_SUPPORT; + + if (!memcmp(data, NE6X_FW_810_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_810_APP; + return 0; + } else if (!memcmp(data, NE6X_FW_NP_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_NP; + return 0; + } else if (!memcmp(data, NE6X_FW_PXE_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_PXE; + return 0; + } else if (!memcmp(data, NE6X_FW_810_LDR_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_810_LOADER; + return 0; + } else if (!memcmp(data, NE6X_FW_FRU_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FRU; + return 0; + } else if (!memcmp(data, NE6X_FW_807_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_807_APP; + return 0; + } else { + return NE6X_FW_NOT_SUPPORT; + } +} + +static int ne6x_dev_check_fw(const u8 *data, const u32 size, const int region) +{ + struct ne6x_fw_common_header *comm_hdr; + struct ne6x_fw_np_header *np_hdr; + u32 hcrc, pcrc, crc; + + switch (region) { + case NE6X_ETHTOOL_FLASH_810_APP: + case NE6X_ETHTOOL_FLASH_PXE: + case NE6X_ETHTOOL_FLASH_810_LOADER: + case NE6X_ETHTOOL_FLASH_807_APP: + comm_hdr = (struct ne6x_fw_common_header *)&data[NE6X_FW_SIG_OFFSET]; + hcrc = comm_hdr->header_crc; + pcrc = comm_hdr->package_crc; + comm_hdr->header_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, sizeof(*comm_hdr)); + if (crc != hcrc) + return NE6X_FW_HEADER_CRC_ERR; + + if (comm_hdr->length != size) + return NE6X_FW_LENGTH_ERR; + + comm_hdr->package_crc = CRC32_INITIAL_REMAINDER; + comm_hdr->header_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, comm_hdr->length); + comm_hdr->package_crc = pcrc; + comm_hdr->header_crc = hcrc; + if (crc != pcrc) + return NE6X_FW_PKG_CRC_ERR; + + break; + case NE6X_ETHTOOL_FLASH_NP: + np_hdr = (struct ne6x_fw_np_header *)&data[NE6X_FW_SIG_OFFSET]; + hcrc = np_hdr->hdr_crc; + pcrc = np_hdr->pkg_crc; + np_hdr->hdr_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, sizeof(*np_hdr)); + if (crc != hcrc) + return NE6X_FW_HEADER_CRC_ERR; + + if (np_hdr->img_length != size) + return NE6X_FW_LENGTH_ERR; + + np_hdr->pkg_crc = CRC32_INITIAL_REMAINDER; + np_hdr->hdr_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, np_hdr->img_length); + np_hdr->pkg_crc = pcrc; + np_hdr->hdr_crc = hcrc; + if (crc != pcrc) + return NE6X_FW_PKG_CRC_ERR; + + break; + } + + return 0; +} + +int ne6x_dev_validate_fw(const u8 *data, const u32 size, int *region) +{ + if (ne6x_dev_get_fw_region(data, size, region)) + return NE6X_FW_NOT_SUPPORT; + + return ne6x_dev_check_fw(data, size, *region); +} + +int ne6x_dev_set_tx_rx_state(struct ne6x_adapter *adpt, int tx_state, int rx_state) +{ + u32 value = ne6x_dev_get_features(adpt); + + if (tx_state) + value &= ~NE6X_F_TX_DISABLE; + else + value |= NE6X_F_TX_DISABLE; + + if (rx_state) + value &= ~NE6X_F_RX_DISABLE; + else + value |= NE6X_F_RX_DISABLE; + + ne6x_dev_set_features(adpt, value); + + return 0; +} + +int ne6x_dev_set_fast_mode(struct ne6x_pf *pf, bool is_fast_mode, u8 number_queue) +{ + u32 mode; + + if (is_fast_mode) { + mode = pf->num_alloc_vfs; + mode |= 1 << 16; + pf->is_fastmode = true; + } else { + mode = 0; + pf->is_fastmode = false; + } + + return ne6x_reg_set_user_data(pf, NP_USER_DATA_FAST_MODE, mode); +} + +int ne6x_dev_get_dump_data_len(struct ne6x_pf *pf, u32 *size) +{ + return ne6x_reg_get_dump_data_len(pf, size); +} + +int ne6x_dev_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size) +{ + return ne6x_reg_get_dump_data(pf, data, size); +} + +int ne6x_dev_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect) +{ + return ne6x_reg_set_norflash_write_protect(pf, write_protect); +} + +int ne6x_dev_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect) +{ + return ne6x_reg_get_norflash_write_protect(pf, p_write_protect); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h new file mode 100644 index 000000000000..02d896596236 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_DEV_H +#define _NE6X_DEV_H + +#include "ne6x_portmap.h" + +#define NE6000AI_2S_X16H_100G_N5 0xA050 +#define NE6000AI_2S_X16H_25G_N5 0xA030 +#define NE6000AI_2S_X16H_25G_N6 0xA031 + +#define NE6000_IF_INTERFACE_UP 1 +#define NE6000_IF_INTERFACE_DOWN 0 + +struct ne6x_flowctrl { + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; + +struct ne6x_sfp_mod_type_len { + u32 type; + u32 len; +}; + +enum { + NE6X_SOC_TEMPERATURE = 0x0, + NE6X_SOC_POWER_CONSUM, + NE6X_SOC_DDR_TEST, + NE6X_SOC_FRU, + NE6X_SOC_SERDES_SEND_BIT, + NE6X_SOC_I2C3_TEST, +}; + +struct ne6x_soc_temperature { + u32 chip_temerature; + u32 board_temperature; +}; + +struct ne6x_soc_power { + u32 cur; + u32 vol; + u32 power; +}; + +#define NE6X_FW_SIG_OFFSET 0x0 + +#define NE6X_FW_SIG_LENGTH 8 + +#define NE6X_FW_810_LDR_SIG "NE6K810L" +#define NE6X_FW_810_APP_SIG "NE6K810A" +#define NE6X_FW_807_APP_SIG "NE6K807A" +#define NE6X_FW_803_APP_SIG "NE6K803A" +#define NE6X_FW_803_LDR_SIG "NE6K803L" +#define NE6X_FW_NP_APP_SIG "NE6KNPV1" +#define NE6X_FW_TBL_SIG "NE6KTBL*" +#define NE6X_FW_PXE_SIG "NE6KPXE*" +#define NE6X_FW_FRU_SIG "NE6KFRU*" + +struct ne6x_fw_common_header { + u8 signature[NE6X_FW_SIG_LENGTH]; + u32 version; + u32 length; + u32 sections; + u32 sect_start_addr; + u32 type; + u32 build_date; + u8 reserved[16]; + u8 fw_ver[8]; + u32 package_crc; + u32 header_crc; +}; /* 64B */ + +struct ne6x_fw_np_iwidth { + char sig[4]; + u16 width; + u16 ocp; +}; /* 8B */ + +struct ne6x_fw_np_isad { + char sig[4]; + u32 isa_id; + + struct ne6x_fw_np_iwidth fp; + struct ne6x_fw_np_iwidth dp; + struct ne6x_fw_np_iwidth rp; +}; /* 32B */ + +struct ne6x_fw_np_atd { + char sig[4]; + u32 at_id; + + struct ne6x_fw_np_iwidth te; +}; /* 16B */ + +struct ne6x_fw_np_header { + char signature[NE6X_FW_SIG_LENGTH]; + u32 hdr_version; + u32 hdr_length; + + u32 rsvd; + u32 build_date; + u32 img_version; + u32 img_length; + + u32 npc_cnt; + u32 npc_offset; + u32 isa_cnt; + u32 isa_offset; + + u32 at_cnt; + u32 at_offset; + u32 atd_cnt; + u32 atd_offset; + + struct ne6x_fw_np_isad ISA[1]; + + struct ne6x_fw_np_atd ATD[1]; + + u32 cipher; /* For future use */ + u32 comp; /* For future use */ + u32 pkg_crc; + u32 hdr_crc; +}; /* 128 B */ + +#define CRC32_REVERSED_POLYNOMIAL 0xEDB88320U +#define CRC32_INITIAL_REMAINDER 0xFFFFFFFFU +#define CRC32_FINALIZE_REMAINDER 0xFFFFFFFFU +#define CRC32_TABLE_SIZE 256U + +enum { + NE6X_FW_NOT_SUPPORT = -1, + NE6X_FW_HEADER_CRC_ERR = -2, + NE6X_FW_LENGTH_ERR = -3, + NE6X_FW_PKG_CRC_ERR = -4, +}; + +struct ne6x_key_filter { + struct list_head list; + struct ne6x_key key; + struct { + u8 is_new_key : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + u8 refcnt; + }; +}; + +struct ne6x_vlan_filter { + struct list_head list; + struct ne6x_vlan vlan; + struct { + u8 is_new_vlan : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + u8 refcnt; + }; +}; + +enum { + NE6X_METER_SUBSET0 = 0x0, + NE6X_METER_SUBSET1, + NE6X_METER_SUBSET2, + NE6X_METER_SUBSET3, + NE6X_METER_SUBSET4, + NE6X_METER_SUBSET5, + NE6X_METER_SUBSET6, + NE6X_METER_SUBSET7, + NE6X_METER_SUBSET8, + NE6X_METER_SUBSET9, + NE6X_METER_SUBSET10, + NE6X_METER_SUBSET11, + NE6X_METER_SUBSET12, + NE6X_METER_SUBSET13, + NE6X_METER_SUBSET14, + NE6X_METER_SUBSET15, +}; + +#define NE6X_METER0_TABLE 0x00000000U +#define NE6X_METER1_TABLE 0x80000000U +#define NE6X_METER_SUBSET(n) (((n) & 0xf) << 27) + +struct vf_stat { + u64 rx_drop_pkts; + u64 rx_broadcast_pkts; + u64 rx_multicast_pkts; + u64 rx_unicast_pkts; + u64 tx_broadcast_pkts; + u64 tx_multicast_pkts; + u64 tx_unicast_pkts; + u64 rx_malform_pkts; + u64 tx_malform_pkts; +}; + +enum ne6x_fec_state { + NE6X_FEC_NONE, + NE6X_FEC_RS, + NE6X_FEC_BASER, + NE6X_FEC_AUTO, +}; + +int ne6x_dev_init(struct ne6x_pf *pf); +int ne6x_dev_get_port_num(struct ne6x_pf *pf); +int ne6x_dev_get_mac_addr(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_get_mac_stats(struct ne6x_adapter *adpt); +int ne6x_dev_get_link_status(struct ne6x_adapter *adpt, struct ne6x_link_info *status); +int ne6x_dev_set_speed(struct ne6x_adapter *adpt, u32 speed); +int ne6x_dev_set_sfp_speed(struct ne6x_adapter *adpt, u32 speed); +int ne6x_dev_get_sfp_speed(struct ne6x_adapter *adpt, u32 *speed); + +int ne6x_dev_reset_firmware(struct ne6x_adapter *adpt); + +int ne6x_dev_self_test_link(struct ne6x_adapter *adpt, int *verify); + +u32 ne6x_dev_get_features(struct ne6x_adapter *adpt); +int ne6x_dev_set_features(struct ne6x_adapter *adpt, u32 value); + +int ne6x_dev_set_mtu(struct ne6x_adapter *adpt, u32 mtu); +int ne6x_dev_get_mtu(struct ne6x_adapter *adpt, u32 *mtu); + +void ne6x_dev_clear_vport(struct ne6x_pf *pf); +void ne6x_dev_set_port2pi(struct ne6x_adapter *adpt); +void ne6x_dev_set_pi2port(struct ne6x_adapter *adpt); +int ne6x_dev_set_vport(struct ne6x_adapter *adpt); + +int ne6x_dev_set_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp); +int ne6x_dev_get_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp); +int ne6x_dev_vlan_add(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan); +int ne6x_dev_vlan_del(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan); +int ne6x_dev_add_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid); +int ne6x_dev_del_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid); +int ne6x_dev_clear_vlan_map(struct ne6x_pf *pf); + +int ne6x_dev_set_rss(struct ne6x_adapter *adpt, struct ne6x_rss_info *info); + +int ne6x_dev_get_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl); +int ne6x_dev_set_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl); +int ne6x_dev_get_port_fec(struct ne6x_adapter *adpt, int *status); + +int ne6x_dev_write_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size); +int ne6x_dev_read_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size); + +int ne6x_dev_clear_stats(struct ne6x_adapter *adpt); + +int ne6x_dev_get_port_fec(struct ne6x_adapter *adpt, int *status); + +int ne6x_dev_set_uc_promiscuous_enable(struct ne6x_adapter *adpt, int enable); +int ne6x_dev_set_mc_promiscuous_enable(struct ne6x_adapter *adpt, int enable); + +int ne6x_dev_set_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state fec); +int ne6x_dev_get_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state *fec); + +int ne6x_dev_add_unicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_del_unicast(struct ne6x_adapter *adpt, u8 *mac); + +int ne6x_dev_add_multicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_del_multicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_enable_rxhash(struct ne6x_adapter *adpt, int enable); +int ne6x_dev_read_qsfp(struct ne6x_adapter *adpt, u8 regaddr, u8 *data, int len); + +int ne6x_dev_upgrade_firmware(struct ne6x_adapter *adpt, u8 region, u8 *data, int size, int flags); + +int ne6x_dev_get_sfp_type_len(struct ne6x_adapter *adpt, struct ne6x_sfp_mod_type_len *sfp_mode); + +int ne6x_dev_get_sfp_eeprom(struct ne6x_adapter *adpt, u8 *data, int offset, int size, int flags); + +int ne6x_dev_set_nic_stop(struct ne6x_pf *pf, u32 flag); +int ne6x_dev_set_nic_start(struct ne6x_pf *pf, u32 flag); +int ne6x_dev_get_temperature_info(struct ne6x_pf *pf, struct ne6x_soc_temperature *temp); +int ne6x_dev_get_power_consum(struct ne6x_pf *pf, struct ne6x_soc_power *power); +int ne6x_dev_get_fru(struct ne6x_pf *pf, u32 *buffer, u32 size); +int ne6x_dev_start_ddr_test(struct ne6x_pf *pf); +int ne6x_dev_i2c3_signal_test(struct ne6x_pf *pf, u32 *id); + +int ne6x_dev_set_if_state(struct ne6x_adapter *adpt, u32 state); +int ne6x_dev_get_if_state(struct ne6x_adapter *adpt, u32 *state); + +int ne6x_dev_get_sfp_status(struct ne6x_adapter *adpt, u8 *status); + +int ne6x_dev_set_led(struct ne6x_adapter *adpt, bool state); +int ne6x_dev_get_vf_stat(struct ne6x_adapter *adpt, struct vf_stat *stat); +int ne6x_dev_reset_vf_stat(struct ne6x_adapter *adpt); +int ne6x_dev_check_speed(struct ne6x_adapter *adpt, u32 speed); + +int ne6x_reg_table_update(struct ne6x_pf *pf, enum ne6x_reg_table table, u32 index, + u32 *data, int size); + +int ne6x_dev_set_fw_lldp(struct ne6x_adapter *adpt, bool state); + +int ne6x_dev_set_vf_bw(struct ne6x_adapter *adpt, int tx_rate); + +int ne6x_dev_test_loopback(struct ne6x_adapter *adpt); +int ne6x_dev_test_reg(struct ne6x_adapter *adpt); +int ne6x_dev_test_intr(struct ne6x_adapter *adpt); +int ne6x_dev_set_port_mac(struct ne6x_adapter *adpt, u8 *data); +int ne6x_dev_add_broadcast_leaf(struct ne6x_adapter *adpt); +int ne6x_dev_del_broadcast_leaf(struct ne6x_adapter *adpt); +int ne6x_dev_validate_fw(const u8 *data, const u32 size, int *region); + +int ne6x_dev_set_tx_rx_state(struct ne6x_adapter *adpt, int tx_state, int rx_state); +int ne6x_dev_set_fast_mode(struct ne6x_pf *pf, bool is_fast_mode, u8 num_queue); +int ne6x_dev_add_unicast_for_fastmode(struct ne6x_adapter *adpt, u8 *mac); + +int ne6x_dev_get_dump_data_len(struct ne6x_pf *pf, u32 *size); +int ne6x_dev_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size); +int ne6x_dev_set_white_list(struct ne6x_pf *pf, bool enable); +void ne6x_dev_set_ddos(struct ne6x_pf *pf, bool enable); +int ne6x_dev_get_pport(struct ne6x_adapter *adpt); +int ne6x_dev_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect); +int ne6x_dev_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect); + +u32 ne6x_dev_crc32(const u8 *buf, u32 size); +void ne6x_dev_set_trust_vlan(struct ne6x_pf *pf, bool enable); +bool ne6x_dev_get_trust_vlan(struct ne6x_pf *pf); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c new file mode 100644 index 000000000000..063b734f238f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c @@ -0,0 +1,1623 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include +#include "version.h" + +static const char ne6x_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test ", "Loopback test ", "Register test ", "Interrupt test" +}; + +#define NE6X_TEST_LEN (sizeof(ne6x_gstrings_test) / ETH_GSTRING_LEN) + +static int ne6x_q_stats_len(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int stats_size, total_slen = 0; + + /* Tx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_txq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + /* Rx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_rxq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + /* CQ stats */ + stats_size = sizeof(struct ne6x_cq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + return total_slen; +} + +struct ne6x_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define NE6X_NETDEV_STAT(_net_stat) NE6X_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +static const struct ne6x_stats ne6x_gstrings_adpt_stats[] = { + NE6X_NETDEV_STAT(rx_packets), + NE6X_NETDEV_STAT(tx_packets), + NE6X_NETDEV_STAT(rx_bytes), + NE6X_NETDEV_STAT(tx_bytes), + NE6X_NETDEV_STAT(rx_errors), + NE6X_NETDEV_STAT(tx_errors), + NE6X_NETDEV_STAT(rx_dropped), + NE6X_NETDEV_STAT(tx_dropped), + NE6X_NETDEV_STAT(collisions), + NE6X_NETDEV_STAT(rx_length_errors), + NE6X_NETDEV_STAT(rx_crc_errors), +}; + +#define NE6X_DEVICE_ETH_STAT(_dev_eth_stat) NE6X_STAT(struct ne6x_eth_stats, \ + #_dev_eth_stat, _dev_eth_stat) + +static const struct ne6x_stats ne6x_gstrings_adpt_dev_eth_stats[] = { + NE6X_DEVICE_ETH_STAT(rx_unicast), + NE6X_DEVICE_ETH_STAT(rx_multicast), + NE6X_DEVICE_ETH_STAT(rx_broadcast), + NE6X_DEVICE_ETH_STAT(rx_discards), + NE6X_DEVICE_ETH_STAT(rx_miss), + NE6X_DEVICE_ETH_STAT(tx_unicast), + NE6X_DEVICE_ETH_STAT(tx_multicast), + NE6X_DEVICE_ETH_STAT(tx_broadcast), + NE6X_DEVICE_ETH_STAT(rx_malform), + NE6X_DEVICE_ETH_STAT(tx_malform), +}; + +#define NE6X_PF_STAT(_name, _stat) NE6X_STAT(struct ne6x_pf, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_pf_stats[] = { + NE6X_PF_STAT("tx_timeout", tx_timeout_count), +}; + +/* per-queue ring statistics */ +#define NE6X_QUEUE_STAT(_name, _stat) NE6X_STAT(struct ne6x_ring, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_tx_queue_stats[] = { + NE6X_QUEUE_STAT("tx_queue_%u_packets", stats.packets), + NE6X_QUEUE_STAT("tx_queue_%u_bytes", stats.bytes), + NE6X_QUEUE_STAT("tx_queue_%u_rst", tx_stats.restart_q), + NE6X_QUEUE_STAT("tx_queue_%u_busy", tx_stats.tx_busy), + NE6X_QUEUE_STAT("tx_queue_%u_line", tx_stats.tx_linearize), + NE6X_QUEUE_STAT("tx_queue_%u_csum_err", tx_stats.csum_err), + NE6X_QUEUE_STAT("tx_queue_%u_csum", tx_stats.csum_good), + NE6X_QUEUE_STAT("tx_queue_%u_pcie_read_err", tx_stats.tx_pcie_read_err), + NE6X_QUEUE_STAT("tx_queue_%u_ecc_err", tx_stats.tx_ecc_err), + NE6X_QUEUE_STAT("tx_queue_%u_drop_addr", tx_stats.tx_drop_addr), +}; + +static const struct ne6x_stats ne6x_gstrings_rx_queue_stats[] = { + NE6X_QUEUE_STAT("rx_queue_%u_packets", stats.packets), + NE6X_QUEUE_STAT("rx_queue_%u_bytes", stats.bytes), + NE6X_QUEUE_STAT("rx_queue_%u_no_eop", rx_stats.non_eop_descs), + NE6X_QUEUE_STAT("rx_queue_%u_alloc_pg_err", rx_stats.alloc_page_failed), + NE6X_QUEUE_STAT("rx_queue_%u_alloc_buf_err", rx_stats.alloc_buf_failed), + NE6X_QUEUE_STAT("rx_queue_%u_pg_reuse", rx_stats.page_reuse_count), + NE6X_QUEUE_STAT("rx_queue_%u_csum_err", rx_stats.csum_err), + NE6X_QUEUE_STAT("rx_queue_%u_csum", rx_stats.csum_good), + NE6X_QUEUE_STAT("rx_queue_%u_mem_err", rx_stats.rx_mem_error), + NE6X_QUEUE_STAT("rx_queue_%u_rx_err", rx_stats.rx_err), +}; + +static const struct ne6x_stats ne6x_gstrings_cq_queue_stats[] = { + NE6X_QUEUE_STAT("cx_queue_%u_nums", cq_stats.cq_num), + NE6X_QUEUE_STAT("cx_queue_%u_tx_nums", cq_stats.tx_num), + NE6X_QUEUE_STAT("cx_queue_%u_rx_nums", cq_stats.rx_num), +}; + +/* port mac statistics */ +#define NE6X_PORT_MAC_STAT(_name, _stat) NE6X_STAT(struct ne6x_adapter, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_port_mac_stats[] = { + NE6X_PORT_MAC_STAT("port.rx_eth_byte", stats.mac_rx_eth_byte), + NE6X_PORT_MAC_STAT("port.rx_eth", stats.mac_rx_eth), + NE6X_PORT_MAC_STAT("port.rx_eth_undersize", stats.mac_rx_eth_undersize), + NE6X_PORT_MAC_STAT("port.rx_eth_crc_err", stats.mac_rx_eth_crc), + NE6X_PORT_MAC_STAT("port.rx_eth_64b", stats.mac_rx_eth_64b), + NE6X_PORT_MAC_STAT("port.rx_eth_65_127b", stats.mac_rx_eth_65_127b), + NE6X_PORT_MAC_STAT("port.rx_eth_128_255b", stats.mac_rx_eth_128_255b), + NE6X_PORT_MAC_STAT("port.rx_eth_256_511b", stats.mac_rx_eth_256_511b), + NE6X_PORT_MAC_STAT("port.rx_eth_512_1023b", stats.mac_rx_eth_512_1023b), + NE6X_PORT_MAC_STAT("port.rx_eth_1024_15360b", stats.mac_rx_eth_1024_15360b), + NE6X_PORT_MAC_STAT("port.tx_eth_byte", stats.mac_tx_eth_byte), + NE6X_PORT_MAC_STAT("port.tx_eth", stats.mac_tx_eth), + NE6X_PORT_MAC_STAT("port.tx_eth_undersize", stats.mac_tx_eth_undersize), + NE6X_PORT_MAC_STAT("port.tx_eth_64b", stats.mac_tx_eth_64b), + NE6X_PORT_MAC_STAT("port.tx_eth_65_127b", stats.mac_tx_eth_65_127b), + NE6X_PORT_MAC_STAT("port.tx_eth_128_255b", stats.mac_tx_eth_128_255b), + NE6X_PORT_MAC_STAT("port.tx_eth_256_511b", stats.mac_tx_eth_256_511b), + NE6X_PORT_MAC_STAT("port.tx_eth_512_1023b", stats.mac_tx_eth_512_1023b), + NE6X_PORT_MAC_STAT("port.tx_eth_1024_15360b", stats.mac_tx_eth_1024_15360b), +}; + +#define NE6X_ADPT_STATS_LEN ARRAY_SIZE(ne6x_gstrings_adpt_stats) +#define NE6X_ADPT_DEV_ETH_STATS_LEN ARRAY_SIZE(ne6x_gstrings_adpt_dev_eth_stats) + +#define NE6X_PF_STATS_LEN ARRAY_SIZE(ne6x_gstrings_pf_stats) +#define NE6X_PORT_MAC_STATS_LEN ARRAY_SIZE(ne6x_gstrings_port_mac_stats) + +#define NE6X_ALL_STATS_LEN(n) \ + (NE6X_ADPT_STATS_LEN + NE6X_ADPT_DEV_ETH_STATS_LEN + \ + NE6X_PF_STATS_LEN + NE6X_PORT_MAC_STATS_LEN + ne6x_q_stats_len(n)) + +struct ne6x_priv_flag { + char name[ETH_GSTRING_LEN]; + u32 bitno; /* bit position in pf->flags */ +}; + +#define NE6X_PRIV_FLAG(_name, _bitno) { \ + .name = _name, \ + .bitno = _bitno, \ +} + +static const struct ne6x_priv_flag ne6x_gstrings_priv_flags[] = { + NE6X_PRIV_FLAG("disable-fw-lldp", NE6X_ADPT_F_DISABLE_FW_LLDP), + NE6X_PRIV_FLAG("link-down-on-close", NE6X_ADPT_F_LINKDOWN_ON_CLOSE), + NE6X_PRIV_FLAG("write-protect", NE6X_ADPT_F_NORFLASH_WRITE_PROTECT), + NE6X_PRIV_FLAG("ddos-switch", NE6X_ADPT_F_DDOS_SWITCH), + NE6X_PRIV_FLAG("white-list", NE6X_ADPT_F_ACL), + NE6X_PRIV_FLAG("trust-vlan", NE6X_ADPT_F_TRUST_VLAN), +}; + +#define NE6X_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ne6x_gstrings_priv_flags) + +static void ne6x_get_settings_link_up_fec(struct net_device *netdev, + u32 link_speed, + struct ethtool_link_ksettings *ks) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + enum ne6x_fec_state fec = NE6X_FEC_NONE; + + switch (link_speed) { + case NE6X_LINK_SPEED_25GB: + case NE6X_LINK_SPEED_100GB: + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + + ne6x_dev_get_fec(adpt, &fec); + if (fec == NE6X_FEC_RS) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + else if (fec == NE6X_FEC_BASER) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); + else + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + + break; + default: + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + break; + } +} + +static void ne6x_get_settings_link_up(struct ethtool_link_ksettings *ks, struct net_device *netdev) +{ + struct ne6x_link_status *link_info; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + link_info = &adpt->port_info->phy.link_info; + switch (link_info->link_speed) { + case NE6X_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 100000baseCR4_Full); + break; + case NE6X_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseCR4_Full); + break; + case NE6X_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); + break; + case NE6X_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); + break; + case NE6X_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; + default: + netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", + link_info->link_speed); + break; + } + + ks->base.duplex = DUPLEX_FULL; + + if (link_info->an_info & NE6X_AQ_AN_COMPLETED) + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Autoneg); + + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + + ne6x_get_settings_link_up_fec(netdev, link_info->link_speed, ks); +} + +static void ne6x_phy_type_to_ethtool(struct ne6x_adapter *adpt, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); +} + +static void ne6x_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ne6x_phy_type_to_ethtool(adpt, ks); + /* With no link, speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +static int ne6x_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ne6x_link_status *hw_link_info; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + hw_link_info = &adpt->port_info->phy.link_info; + + /* set speed and duplex */ + if (hw_link_info->link_info & NE6X_AQ_LINK_UP) + ne6x_get_settings_link_up(ks, netdev); + else + ne6x_get_settings_link_down(ks, netdev); + + if (!ne6x_dev_check_speed(adpt, SPEED_10000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_25000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_100000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseCR4_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_40000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); + + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + + /* Set flow control settings */ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + + return 0; +} + +static int ne6x_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + bool if_running = netif_running(netdev); + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + u32 master = (adpt->idx == 0); + char *speed = "Unknown "; + u32 link_speed; + u32 sfp_speed; + int ret; + + if (ne6x_dev_check_speed(adpt, ks->base.speed)) { + dev_info(&pf->pdev->dev, "speed not support\n"); + return -EOPNOTSUPP; + } + + if (!master && pf->dev_type == NE6000AI_2S_X16H_25G_N5) { + dev_info(&pf->pdev->dev, "only master port can change speed\n"); + return -EOPNOTSUPP; + } + + switch (ks->base.speed) { + case SPEED_100000: + link_speed = NE6X_LINK_SPEED_100GB; + break; + case SPEED_40000: + link_speed = NE6X_LINK_SPEED_40GB; + break; + case SPEED_25000: + link_speed = NE6X_LINK_SPEED_25GB; + break; + case SPEED_10000: + link_speed = NE6X_LINK_SPEED_10GB; + break; + default: + return -EOPNOTSUPP; + } + + ret = ne6x_dev_get_sfp_speed(adpt, &sfp_speed); + if (!ret) { + switch (sfp_speed) { + case NE6X_LINK_SPEED_40GB: + speed = "40 G"; + break; + case NE6X_LINK_SPEED_100GB: + speed = "100 G"; + break; + case NE6X_LINK_SPEED_10GB: + speed = "10 G"; + break; + case NE6X_LINK_SPEED_25GB: + speed = "25 G"; + break; + case NE6X_LINK_SPEED_200GB: + speed = "200 G"; + break; + default: + break; + } + + if (sfp_speed != link_speed) + netdev_info(adpt->netdev, "speed not match, sfp support%sbps Full Duplex\n", + speed); + } + + if (if_running) + ne6x_close(adpt->netdev); + + ret = ne6x_dev_set_speed(adpt, link_speed); + if (if_running) + ne6x_open(adpt->netdev); + + return ret; +} + +static void __ne6x_add_stat_strings(u8 **p, const struct ne6x_stats stats[], + const unsigned int size, + ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +#define ne6x_add_stat_strings(p, stats, ...) \ + __ne6x_add_stat_strings(p, stats, ARRAY_SIZE(stats), ##__VA_ARGS__) + +static void ne6x_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + unsigned int i; + + ne6x_add_stat_strings(&data, ne6x_gstrings_adpt_stats); + ne6x_add_stat_strings(&data, ne6x_gstrings_adpt_dev_eth_stats); + ne6x_add_stat_strings(&data, ne6x_gstrings_pf_stats); + + for (i = 0; i < adpt->num_queue; i++) { + ne6x_add_stat_strings(&data, ne6x_gstrings_tx_queue_stats, i); + ne6x_add_stat_strings(&data, ne6x_gstrings_rx_queue_stats, i); + ne6x_add_stat_strings(&data, ne6x_gstrings_cq_queue_stats, i); + } + + ne6x_add_stat_strings(&data, ne6x_gstrings_port_mac_stats); +} + +static void ne6x_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + u8 *p = data; + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", ne6x_gstrings_priv_flags[i].name); + p += ETH_GSTRING_LEN; + } +} + +static void ne6x_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + ne6x_get_stat_strings(netdev, data); + break; + case ETH_SS_TEST: + memcpy(data, ne6x_gstrings_test, NE6X_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_PRIV_FLAGS: + ne6x_get_priv_flag_strings(netdev, data); + break; + default: + break; + } +} + +static int ne6x_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return NE6X_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return NE6X_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return NE6X_PRIV_FLAG_ARRAY_SIZE; + default: + return -EOPNOTSUPP; + } +} + +static void ne6x_get_mac_stats(struct ne6x_adapter *adpt) +{ + ne6x_dev_get_mac_stats(adpt); +} + +static void ne6x_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + unsigned int j; + int i = 0; + char *p; + + ne6x_update_pf_stats(adpt); + + for (j = 0; j < NE6X_ADPT_STATS_LEN; j++) { + p = (char *)ne6x_get_adpt_stats_struct(adpt) + + ne6x_gstrings_adpt_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_adpt_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < NE6X_ADPT_DEV_ETH_STATS_LEN; j++) { + p = (char *)(&adpt->eth_stats) + + ne6x_gstrings_adpt_dev_eth_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_adpt_dev_eth_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < NE6X_PF_STATS_LEN; j++) { + p = (char *)pf + ne6x_gstrings_pf_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_pf_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + + /* populate per queue stats */ + rcu_read_lock(); + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = READ_ONCE(adpt->tx_rings[j]); + if (tx_ring) { + data[i++] = tx_ring->stats.packets; + data[i++] = tx_ring->stats.bytes; + data[i++] = tx_ring->tx_stats.restart_q; + data[i++] = tx_ring->tx_stats.tx_busy; + data[i++] = tx_ring->tx_stats.tx_linearize; + data[i++] = tx_ring->tx_stats.csum_err; + data[i++] = tx_ring->tx_stats.csum_good; + data[i++] = tx_ring->tx_stats.tx_pcie_read_err; + data[i++] = tx_ring->tx_stats.tx_ecc_err; + data[i++] = tx_ring->tx_stats.tx_drop_addr; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + rx_ring = READ_ONCE(adpt->rx_rings[j]); + if (rx_ring) { + data[i++] = rx_ring->stats.packets; + data[i++] = rx_ring->stats.bytes; + data[i++] = rx_ring->rx_stats.non_eop_descs; + data[i++] = rx_ring->rx_stats.alloc_page_failed; + data[i++] = rx_ring->rx_stats.alloc_buf_failed; + data[i++] = rx_ring->rx_stats.page_reuse_count; + data[i++] = rx_ring->rx_stats.csum_err; + data[i++] = rx_ring->rx_stats.csum_good; + data[i++] = rx_ring->rx_stats.rx_mem_error; + data[i++] = rx_ring->rx_stats.rx_err; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + cq_ring = READ_ONCE(adpt->cq_rings[j]); + if (cq_ring) { + data[i++] = cq_ring->cq_stats.cq_num; + data[i++] = cq_ring->cq_stats.tx_num; + data[i++] = cq_ring->cq_stats.rx_num; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + } + + rcu_read_unlock(); + + ne6x_get_mac_stats(adpt); + + for (j = 0; j < NE6X_PORT_MAC_STATS_LEN; j++) { + p = (char *)adpt + ne6x_gstrings_port_mac_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_port_mac_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } +} + +extern char ne6x_driver_name[]; + +static void ne6x_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + u32 soc_ver = 0, np_ver = 0, erom_ver = 0; + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + char nvm_version_str[32]; + char driver_name[32]; + char temp_str[16] = {0}; + + snprintf(driver_name, 32, "%s", ne6x_driver_name); + strscpy(drvinfo->driver, driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); + memset(nvm_version_str, 0, sizeof(nvm_version_str)); + soc_ver = pf->verinfo.firmware_soc_ver; + np_ver = pf->verinfo.firmware_np_ver & 0xFFFF; + erom_ver = pf->verinfo.firmware_pxe_ver & 0xFFFF; + snprintf(nvm_version_str, 20, "%d.%d.%d.%d ", (soc_ver & 0xff000000) >> 24, + ((erom_ver & 0xFFFF) / 100), ((soc_ver & 0xFFFF) / 100), + ((np_ver & 0xFFFF) / 100)); + if (erom_ver % 100) { + snprintf(temp_str, 4, "P%d", (erom_ver % 100)); + strncat(nvm_version_str, temp_str, 4); + } + if ((soc_ver & 0xffff) % 100) { + snprintf(temp_str, 4, "A%d", ((soc_ver & 0xffff) % 100)); + strncat(nvm_version_str, temp_str, 4); + } + if (np_ver % 100) { + snprintf(temp_str, 4, "N%d", (np_ver % 100)); + strncat(nvm_version_str, temp_str, 4); + } + strlcpy(drvinfo->fw_version, nvm_version_str, sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); +} + +static void ne6x_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + struct ne6x_hw *hw = &pf->hw; + unsigned int i, j, ri; + u32 *reg_buf = p; + u32 reg; + + regs->version = 1; + + /* loop through the diags reg table for what to print */ + ri = 0; + for (i = 0; ne6x_reg_list[i].offset != 0; i++) { + for (j = 0; j < ne6x_reg_list[i].elements; j++) { + reg = ne6x_reg_list[i].offset + (j * ne6x_reg_list[i].stride); + reg_buf[ri++] = rd64(hw, reg); + } + } +} + +static void ne6x_self_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, sizeof(*data) * NE6X_TEST_LEN); +} + +static int ne6x_get_regs_len(struct net_device *netdev) +{ + int reg_count = 0; + int i; + + for (i = 0; ne6x_reg_list[i].offset != 0; i++) + reg_count += ne6x_reg_list[i].elements; + + return reg_count * sizeof(u32); +} + +static void ne6x_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ring->rx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adpt->num_rx_desc; + ring->tx_pending = adpt->num_tx_desc; + ring->rx_mini_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_pending = 0; +} + +static int ne6x_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + u32 new_rx_count, new_tx_count, new_cq_count, new_tg_count; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int timeout = 50; + int err = 0; + int i; + + if (ring->tx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->tx_pending < NE6X_MIN_NUM_DESCRIPTORS || + ring->rx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->rx_pending < NE6X_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, NE6X_MIN_NUM_DESCRIPTORS, + NE6X_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_cq_count = new_tx_count + new_rx_count; + new_tg_count = new_tx_count; + + if (new_tx_count == adpt->num_tx_desc && new_rx_count == adpt->num_rx_desc) + return 0; + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + + usleep_range(1000, 2000); + } + + if (!netif_running(adpt->netdev)) { + adpt->num_tx_desc = new_tx_count; + adpt->num_rx_desc = new_rx_count; + adpt->num_cq_desc = new_cq_count; + adpt->num_tg_desc = new_tg_count; + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + err = ne6x_close(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to close adpt = %d\n", adpt->idx); + goto done; + } + + netdev_info(netdev, "Descriptors change from (Tx: %d / Rx: %d) to [%d-%d]\n", + adpt->tx_rings[0]->count, adpt->rx_rings[0]->count, new_tx_count, new_rx_count); + + /* simple case - set for the next time the netdev is started */ + for (i = 0; i < adpt->num_queue; i++) { + adpt->tx_rings[i]->count = new_tx_count; + adpt->rx_rings[i]->count = new_rx_count; + adpt->cq_rings[i]->count = new_cq_count; + adpt->tg_rings[i]->count = new_tg_count; + } + + adpt->num_tx_desc = new_tx_count; + adpt->num_rx_desc = new_rx_count; + adpt->num_cq_desc = new_cq_count; + adpt->num_tg_desc = new_tg_count; + + err = ne6x_open(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to open adpt = %d\n", adpt->idx); + goto done; + } + +done: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return err; +} + +static void ne6x_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_flowctrl flowctrl; + int ret; + + ret = ne6x_dev_get_flowctrl(adpt, &flowctrl); + if (ret) + return; + + pause->autoneg = 0; + pause->rx_pause = flowctrl.rx_pause; + pause->tx_pause = flowctrl.tx_pause; +} + +static int ne6x_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_flowctrl flowctrl; + int ret; + + if (pause->autoneg) + return -EOPNOTSUPP; + + flowctrl.autoneg = pause->autoneg; + flowctrl.rx_pause = pause->rx_pause; + flowctrl.tx_pause = pause->tx_pause; + + ret = ne6x_dev_set_flowctrl(adpt, &flowctrl); + if (ret) + return ret; + + return 0; +} + +static int ne6x_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + ec->tx_max_coalesced_frames_irq = 256; + ec->rx_max_coalesced_frames_irq = 256; + ec->use_adaptive_rx_coalesce = 0; + ec->use_adaptive_tx_coalesce = 0; + ec->rx_coalesce_usecs = 0; + ec->tx_coalesce_usecs = 0; + ec->rx_coalesce_usecs_high = 0; + ec->tx_coalesce_usecs_high = 0; + + return 0; +} + +static int ne6x_get_eeprom_len(struct net_device *netdev) { return 256; } + +static int ne6x_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + u8 *eeprom_buff; + int err = 0; + int ret_val; + u32 magic; + + if (eeprom->len == 0) + return -EINVAL; + + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic && eeprom->magic != magic) { + /* make sure it is the right magic for NVMUpdate */ + if ((eeprom->magic >> 16) != hw->device_id) + err = -EINVAL; + else if (test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) + err = -EBUSY; + + return err; + } + + /* normal ethtool get_eeprom support */ + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = ne6x_dev_read_eeprom(adpt, 0x0, (u8 *)eeprom_buff, eeprom->len); + memcpy(bytes, eeprom_buff, eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) + +static u64 ne6x_get_rss_hash_opts(struct ne6x_adapter *adpt, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ + data |= RXH_IP_SRC | RXH_IP_DST; + break; + } + + return data; +} + +static int ne6x_set_rss_hash_opts(struct ne6x_adapter *adpt, struct ethtool_rxnfc *cmd) +{ + u16 rss_flags = adpt->rss_info.hash_type; + int status; + + if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_TCP; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_TCP; + break; + case UDP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_UDP; + break; + case UDP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_UDP; + break; + default: + return -EINVAL; + } + + if (rss_flags == adpt->rss_info.hash_type) + return 0; + + adpt->rss_info.hash_type = rss_flags; + + status = ne6x_dev_set_rss(adpt, &adpt->rss_info); + + return (status != 0) ? (-EIO) : 0; +} + +static int ne6x_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + switch (info->cmd) { + case ETHTOOL_GRXFH: + info->data = ne6x_get_rss_hash_opts(adpt, info->flow_type); + break; + case ETHTOOL_GRXRINGS: + info->data = adpt->num_queue; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ne6x_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int status = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adpt->num_queue; + break; + case ETHTOOL_SRXFH: + status = ne6x_set_rss_hash_opts(adpt, info); + break; + default: + return -EINVAL; + } + + return status; +} + +static u32 ne6x_get_rxfh_key_size(struct net_device *netdev) +{ + return NE6X_RSS_MAX_KEY_SIZE; +} + +static u32 ne6x_get_rss_table_size(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + + return rss_info->ind_table_size; +} + +static int ne6x_get_rxfh(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + unsigned int n = rss_info->ind_table_size; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (p) { + while (n--) + p[n] = rss_info->ind_table[n]; + } + + if (key) + memcpy(key, rss_info->hash_key, ne6x_get_rxfh_key_size(netdev)); + + return 0; +} + +static int ne6x_set_rxfh(struct net_device *netdev, const u32 *p, const u8 *key, const u8 hfunc) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + unsigned int i; + int status; + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + /* Fill out the redirection table */ + if (p) { + /* Allow at least 2 queues w/ SR-IOV. */ + for (i = 0; i < rss_info->ind_table_size; i++) + rss_info->ind_table[i] = p[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(&rss_info->hash_key[0], key, ne6x_get_rxfh_key_size(netdev)); + + status = ne6x_dev_set_rss(adpt, rss_info); + + return (status == 0) ? 0 : (-EIO); +} + +static void ne6x_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = adpt->port_info->hw_max_queue; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = adpt->num_queue; +} + +static int ne6x_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + int qp_remaining, q_vectors, i; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int timeout = 50; + int err = 0; + + if (!channels->combined_count || channels->rx_count || channels->tx_count || + channels->combined_count > pf->hw.expect_vp) + return -EINVAL; + + if (channels->combined_count == adpt->num_queue) { + /* nothing to do */ + netdev_info(netdev, "channel not change, nothing to do!\n"); + return 0; + } + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + netdev_info(netdev, "ne6x config busy, timeout!!!\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* set for the next time the netdev is started */ + if (!netif_running(adpt->netdev)) { + adpt->port_info->queue = channels->combined_count; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = + DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = + adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = + ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + err = ne6x_close(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to close adpt = %d\n", adpt->idx); + goto done; + } + + adpt->port_info->queue = channels->combined_count; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + err = ne6x_open(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to open adpt = %d\n", adpt->idx); + goto done; + } + +done: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return err; +} + +static int ne6x_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + ne6x_dev_set_led(adpt, true); + return 1; + case ETHTOOL_ID_ON: + return 0; + case ETHTOOL_ID_OFF: + return 0; + case ETHTOOL_ID_INACTIVE: + ne6x_dev_set_led(adpt, false); + } + + return 0; +} + +static int ne6x_nway_reset(struct net_device *netdev) { return 0; } + +static u64 ne6x_link_test(struct net_device *netdev, u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + bool link_up = false; + int verify; + + verify = 0; + link_up = adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP; + usleep_range(10, 20); + + link_up &= verify; + if (link_up) + *data = 1; + else + *data = 0; + + return *data; +} + +static void ne6x_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + /* Online tests */ + if (ne6x_link_test(netdev, &data[NE6X_ETH_TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[NE6X_ETH_TEST_LOOPBACK] = 0; + if (ne6x_dev_test_loopback(adpt)) { + data[NE6X_ETH_TEST_LOOPBACK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[NE6X_ETH_TEST_REG] = 0; + if (ne6x_dev_test_reg(adpt)) { + data[NE6X_ETH_TEST_REG] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[NE6X_ETH_TEST_INT] = 0; + if (ne6x_dev_test_intr(adpt)) { + data[NE6X_ETH_TEST_INT] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static int ne6x_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct ne6x_link_status *hw_link_info; + enum ne6x_fec_state fec = NE6X_FEC_NONE; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int err = 0; + + hw_link_info = &adpt->port_info->phy.link_info; + if (hw_link_info->link_info & NE6X_AQ_LINK_UP) { + switch (hw_link_info->link_speed) { + case NE6X_LINK_SPEED_25GB: + case NE6X_LINK_SPEED_100GB: + err = ne6x_dev_get_fec(adpt, &fec); + if (fec == NE6X_FEC_RS) { + fecparam->fec |= ETHTOOL_FEC_RS; + fecparam->active_fec = ETHTOOL_FEC_RS; + } else if (fec == NE6X_FEC_BASER) { + fecparam->fec |= ETHTOOL_FEC_BASER; + fecparam->active_fec = ETHTOOL_FEC_BASER; + } else { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + } + break; + default: + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } + } else { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + } + + return err; +} + +static int ne6x_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + enum ne6x_fec_state fec = NE6X_FEC_NONE; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int err = 0; + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: AUTO"); + err = -EINVAL; + goto done; + case ETHTOOL_FEC_RS: + fec = NE6X_FEC_RS; + break; + case ETHTOOL_FEC_BASER: + fec = NE6X_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + fec = NE6X_FEC_NONE; + break; + default: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d", fecparam->fec); + err = -EINVAL; + goto done; + } + + err = ne6x_dev_set_fec(adpt, fec); + if (err) + return err; + +done: + return err; +} + +static const char * const flash_region_strings[] = { + "810 loader", + "810 app", + "807 app", + "NP Image", + "PXE Image", +}; + +static int ethtool_flash_firmware(struct net_device *netdev, u32 type, const u8 *data, + u32 size) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int ret; + + ret = ne6x_dev_upgrade_firmware(adpt, type, (u8 *)data, size, 1); + if (ret) + dev_err(&pf->pdev->dev, "Failed to flash firmware\n"); + + return ret; +} + +static int ethtool_flash_region(struct net_device *netdev, const u8 *data, u32 size, u32 region) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + int ret; + + netdev_info(netdev, "%s = 0x%x\n", __func__, region); + + switch (region) { + case NE6X_ETHTOOL_FLASH_810_APP: + case NE6X_ETHTOOL_FLASH_NP: + case NE6X_ETHTOOL_FLASH_PXE: + case NE6X_ETHTOOL_FLASH_810_LOADER: + case NE6X_ETHTOOL_FRU: + case NE6X_ETHTOOL_FLASH_807_APP: + ret = ethtool_flash_firmware(netdev, region, data, size); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + if (ret) + dev_info(&pf->pdev->dev, "loading %s fail, reload driver\n", + flash_region_strings[region]); + + return ret; +} + +static int ne6x_ethtool_get_flash_region(struct net_device *netdev, const u8 *data, u32 *size) +{ + int region = -1; + int ret; + + ret = ne6x_dev_validate_fw(data, *size, ®ion); + if (ret) { + netdev_err(netdev, "firmware error ret = %d\n", ret); + return -1; + } + + return region; +} + +static int ne6x_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + const struct firmware *fw; + unsigned int master; + size_t fw_size; + u8 *fw_data; + int region; + int ret; + + master = (adpt->idx == 0); + if (!master) { + dev_info(&pf->pdev->dev, "only master port can upgrade\n"); + return -1; + } + + ret = request_firmware(&fw, ef->data, &pf->pdev->dev); + if (ret < 0) + return ret; + + fw_data = (u8 *)fw->data; + fw_size = fw->size; + if (fw_size > 0) { + region = ne6x_ethtool_get_flash_region(netdev, fw_data, (u32 *)&fw_size); + if (region < 0) { + ret = region; + goto out_free_fw; + } + + ret = ethtool_flash_region(netdev, fw_data, fw_size, region); + if (ret) + goto out_free_fw; + } + +out_free_fw: + release_firmware(fw); + return ret; +} + +#define NE6X_FIRMWARE_RESET_CHIP \ + ((ETH_RESET_MGMT | ETH_RESET_IRQ | \ + ETH_RESET_DMA | ETH_RESET_FILTER | \ + ETH_RESET_OFFLOAD | ETH_RESET_MAC | \ + ETH_RESET_PHY | ETH_RESET_RAM) << ETH_RESET_SHARED_SHIFT) + +static int ne6x_reset(struct net_device *netdev, u32 *flags) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + bool reload = false; + u32 req = *flags; + + if (!req) + return -EINVAL; + + if (adpt->idx != 0x0) { + netdev_err(netdev, "Reset is not supported from a eth0_nfp1\n"); + return -EOPNOTSUPP; + } + + if ((req & NE6X_FIRMWARE_RESET_CHIP) == NE6X_FIRMWARE_RESET_CHIP) { + /* This feature is not supported in older firmware versions */ + if (!ne6x_dev_reset_firmware(adpt)) { + netdev_info(netdev, "Firmware reset request successful.\n"); + reload = true; + *flags &= ~NE6X_FIRMWARE_RESET_CHIP; + } + } + + if (reload) + netdev_info(netdev, "Reload driver to complete reset\n"); + + return 0; +} + +static int ne6x_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_sfp_mod_type_len sfp_mod; + int err; + + err = ne6x_dev_get_sfp_type_len(adpt, &sfp_mod); + if (err) + return err; + + modinfo->type = sfp_mod.type; + modinfo->eeprom_len = sfp_mod.len; + netdev_info(netdev, "type %d erprom_len %d.\n", sfp_mod.type, sfp_mod.len); + + return 0; +} + +#define STD_SFP_INFO_MAX_SIZE 640 + +static int ne6x_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + int err; + + if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, ee->len); + err = ne6x_dev_get_sfp_eeprom(adpt, sfp_data, ee->offset, ee->len, 0); + if (err) + return err; + + memcpy(data, sfp_data + ee->offset, ee->len); + + return 0; +} + +static u32 ne6x_get_priv_flags(struct net_device *netdev) +{ + const struct ne6x_priv_flag *priv_flag; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 is_write_proterct = false; + u32 i, ret_flags = 0; + u32 value = 0; + + ne6x_dev_get_norflash_write_protect(adpt->back, &is_write_proterct); + if (is_write_proterct) + set_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + else + clear_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + + if (ne6x_dev_get_trust_vlan(adpt->back)) + set_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags); + else + clear_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags); + value = ne6x_dev_get_features(adpt); + if (value & NE6X_F_RX_FW_LLDP) + clear_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + else + set_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &ne6x_gstrings_priv_flags[i]; + if (test_bit(priv_flag->bitno, adpt->flags)) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +static int ne6x_set_priv_flags(struct net_device *netdev, u32 flags) +{ + DECLARE_BITMAP(change_flags, NE6X_ADPT_F_NBITS); + DECLARE_BITMAP(orig_flags, NE6X_ADPT_F_NBITS); + const struct ne6x_priv_flag *priv_flag; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int ret = 0; + u32 i; + + if (flags > BIT(NE6X_PRIV_FLAG_ARRAY_SIZE)) + return -EINVAL; + + bitmap_copy(orig_flags, adpt->flags, NE6X_ADPT_F_NBITS); + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &ne6x_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + set_bit(priv_flag->bitno, adpt->flags); + else + clear_bit(priv_flag->bitno, adpt->flags); + } + + bitmap_xor(change_flags, adpt->flags, orig_flags, NE6X_ADPT_F_NBITS); + + if (test_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, change_flags)) { + if (test_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags)) + ne6x_dev_set_fw_lldp(adpt, false); + else + ne6x_dev_set_fw_lldp(adpt, true); + } + + if (test_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, change_flags)) { + if (test_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags)) + ne6x_dev_set_norflash_write_protect(adpt->back, true); + else + ne6x_dev_set_norflash_write_protect(adpt->back, false); + } + + if (test_bit(NE6X_ADPT_F_DDOS_SWITCH, change_flags)) { + if (test_bit(NE6X_ADPT_F_DDOS_SWITCH, adpt->flags)) + ne6x_dev_set_ddos(adpt->back, true); + else + ne6x_dev_set_ddos(adpt->back, false); + } + + if (test_bit(NE6X_ADPT_F_ACL, change_flags)) { + if (adpt->idx != 0) { + netdev_err(netdev, "only adpt 0 support acl flag\n"); + return -EINVAL; + } + if (test_bit(NE6X_ADPT_F_ACL, adpt->flags)) { + if (ne6x_dev_set_white_list(adpt->back, true)) + return -EPERM; + } else { + ne6x_dev_set_white_list(adpt->back, false); + } + } + if (test_bit(NE6X_ADPT_F_TRUST_VLAN, change_flags)) { + if (test_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags)) + ne6x_dev_set_trust_vlan(adpt->back, true); + else + ne6x_dev_set_trust_vlan(adpt->back, false); + } + return ret; +} + +static int ne6x_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(dev); + + dump->version = 1; + dump->flag = 0; + + /* Calculate the requested preset idx length */ + if (ne6x_dev_get_dump_data_len(pf, &dump->len)) { + dump->len = 0; + return -EAGAIN; + } + + return 0; +} + +static int ne6x_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, void *buffer) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(dev); + u32 *p = buffer; + + if (ne6x_dev_get_dump_data(pf, p, dump->len)) + return -EAGAIN; + + return 0; +} + +static const struct ethtool_ops ne6x_ethtool_ops = { + .get_link_ksettings = ne6x_get_link_ksettings, + .set_link_ksettings = ne6x_set_link_ksettings, + .get_strings = ne6x_get_strings, + .get_sset_count = ne6x_get_sset_count, + .get_ethtool_stats = ne6x_get_ethtool_stats, + .get_drvinfo = ne6x_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_regs = ne6x_get_regs, + .get_regs_len = ne6x_get_regs_len, + .get_dump_flag = ne6x_get_dump_flag, + .get_dump_data = ne6x_get_dump_data, + .self_test = ne6x_self_test, + .get_ringparam = ne6x_get_ringparam, + .set_ringparam = ne6x_set_ringparam, + .get_pauseparam = ne6x_get_pauseparam, + .set_pauseparam = ne6x_set_pauseparam, + .get_coalesce = ne6x_get_coalesce, + .get_eeprom_len = ne6x_get_eeprom_len, + .get_eeprom = ne6x_get_eeprom, + .get_rxnfc = ne6x_get_rxnfc, + .set_rxnfc = ne6x_set_rxnfc, + .get_rxfh_key_size = ne6x_get_rxfh_key_size, + .get_rxfh_indir_size = ne6x_get_rss_table_size, + .get_rxfh = ne6x_get_rxfh, + .set_rxfh = ne6x_set_rxfh, + .get_channels = ne6x_get_channels, + .set_channels = ne6x_set_channels, + .flash_device = ne6x_set_flash, + .reset = ne6x_reset, + .get_module_info = ne6x_get_module_info, + .get_module_eeprom = ne6x_get_module_eeprom, + .get_priv_flags = ne6x_get_priv_flags, + .set_priv_flags = ne6x_set_priv_flags, + .set_phys_id = ne6x_set_phys_id, + .nway_reset = ne6x_nway_reset, + .self_test = ne6x_diag_test, + .get_fecparam = ne6x_get_fec_param, + .set_fecparam = ne6x_set_fec_param, +}; + +void ne6x_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ne6x_ethtool_ops; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h new file mode 100644 index 000000000000..54d84d65900f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_ETHTOOL_H +#define _NE6X_ETHTOOL_H + +#define NE6X_STAT(_type, _name, _stat) \ +{ \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +enum ne6x_ethtool_test_id { + NE6X_ETH_TEST_LINK, + NE6X_ETH_TEST_LOOPBACK, + NE6X_ETH_TEST_REG, + NE6X_ETH_TEST_INT, + NE6X_ETH_TEST_CHIP_TEMPERATUR, + NE6X_ETH_TEST_BOARD_TEMPERATUR, + NE6X_ETH_TEST_CURRENT, + NE6X_ETH_TEST_VOLTAGE, + NE6X_ETH_TEST_POWER, + NE6X_ETH_TEST_I2C3, +}; + +void ne6x_set_ethtool_ops(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c new file mode 100644 index 000000000000..060ea5b8eca2 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_interrupt.h" + +static int ne6x_init_msix(struct ne6x_pf *pf, int budget) +{ + int actual_vector; + ssize_t size; + + actual_vector = pci_enable_msix_range(pf->pdev, pf->msix_entries, NE6X_MIN_MSIX, budget); + dev_info(&pf->pdev->dev, "%s actual_vector = %d\n", __func__, actual_vector); + if (actual_vector <= 0) { + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); + dev_err(&pf->pdev->dev, "error msix enable failed\n"); + return -ENODEV; + } + + size = struct_size(pf->irq_pile, list, actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); + return -ENOMEM; + } + pf->irq_pile->num_entries = actual_vector; + + return 0; +} + +static int ne6x_init_intx(struct ne6x_pf *pf) +{ + int actual_vector; + ssize_t size; + + dev_info(&pf->pdev->dev, "try enable intx\n"); + actual_vector = 0x1; + + size = struct_size(pf->irq_pile, list, actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error intx allocating irq_pile memory\n"); + return -ENOMEM; + } + pf->irq_pile->num_entries = actual_vector; + + test_and_set_bit(NE6X_PF_INTX, pf->state); + + return 0; +} + +int ne6x_init_interrupt_scheme(struct ne6x_pf *pf) +{ + union ne6x_ciu_time_out_cfg ciu_time_out_cdg; + union ne6x_all_rq_cfg all_rq_cfg; + union ne6x_all_sq_cfg all_sq_cfg; + union ne6x_all_cq_cfg all_cq_cfg; + union ne6x_merge_cfg merge_cfg; + struct ne6x_hw *hw = &pf->hw; + u64 __iomem *reg; + int err; + int i; + + pf->msix_entries = kcalloc(NE6X_MAX_MSIX_NUM, sizeof(struct msix_entry), GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + for (i = 0; i < NE6X_MAX_MSIX_NUM; i++) + pf->msix_entries[i].entry = i; + + test_and_set_bit(NE6X_PF_MSIX, pf->state); + + if (ne6x_init_msix(pf, NE6X_MAX_MSIX_NUM)) { + clear_bit(NE6X_PF_MSIX, pf->state); + err = ne6x_init_intx(pf); + if (err) { + dev_err(&pf->pdev->dev, "error intx enable failed\n"); + return err; + } + } + + if (pf->irq_pile->num_entries >= NE6X_MAX_MSIX_NUM) { + err = ne6x_init_link_irq(pf); + if (err) { + dev_err(&pf->pdev->dev, "init int irq failed\n"); + return err; + } + } + + /* We only initialize int once, so as not to overwrite user settings */ + if (test_and_set_bit(NE6X_INT_INIT_DOWN, pf->state)) + return 0; + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_RQ_CFG); + all_rq_cfg.val = readq(reg); + all_rq_cfg.reg.csr_allrq_pull_merge_cfg = 0x10; + writeq(all_rq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_SQ_CFG); + all_sq_cfg.val = readq(reg); + all_sq_cfg.reg.csr_allsq_pull_merge_cfg = 0x10; + writeq(all_sq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_CQ_CFG); + all_cq_cfg.val = readq(reg); + all_cq_cfg.reg.csr_allcq_merge_size = 0x1; + all_cq_cfg.reg.csr_allcq_wt_rr_cnt = 0x7F; + all_cq_cfg.reg.csr_allcq_wt_rr_flag = 0x1; + writeq(all_cq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_MERGE_CFG); + merge_cfg.val = readq(reg); + merge_cfg.reg.csr_merge_clk_cnt = 800; + writeq(merge_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_CIU_TIME_OUT_CFG); + ciu_time_out_cdg.val = readq(reg); + ciu_time_out_cdg.reg.csr_int_timer_out_cnt = 0xfff; + writeq(ciu_time_out_cdg.val, reg); + + return 0; +} + +static int ne6x_adpt_alloc_q_vector(struct ne6x_adapter *adpt, int v_idx) +{ + struct ne6x_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->adpt = adpt; + q_vector->v_idx = v_idx; + + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + + if (adpt->netdev) + netif_napi_add(adpt->netdev, &q_vector->napi, ne6x_napi_poll); + + /* tie q_vector and adpt together */ + adpt->q_vectors[v_idx] = q_vector; + return 0; +} + +static void ne6x_free_q_vector(struct ne6x_adapter *adpt, int v_idx) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[v_idx]; + struct ne6x_ring *ring; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + if (!q_vector) { + dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); + return; + } + + /* disassociate q_vector from rings */ + ne6x_for_each_ring(ring, q_vector->tx) ring->q_vector = NULL; + + ne6x_for_each_ring(ring, q_vector->rx) ring->q_vector = NULL; + + ne6x_for_each_ring(ring, q_vector->cq) ring->q_vector = NULL; + + /* only adapter w/ an associated netdev is set up w/ NAPI */ + if (adpt->netdev) + netif_napi_del(&q_vector->napi); + + adpt->q_vectors[v_idx] = NULL; + kfree(q_vector); +} + +static int ne6x_adpt_alloc_q_vectors(struct ne6x_adapter *adpt) +{ + int v_idx, num_q_vectors, err; + + /* if not MSIX, give the one vector only to the LAN adapter */ + num_q_vectors = adpt->num_q_vectors; + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = ne6x_adpt_alloc_q_vector(adpt, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + ne6x_free_q_vector(adpt, v_idx); + + return err; +} + +void ne6x_adpt_free_q_vectors(struct ne6x_adapter *adpt) +{ + int v_idx; + + for (v_idx = 0; v_idx < adpt->num_q_vectors; v_idx++) + ne6x_free_q_vector(adpt, v_idx); +} + +int ne6x_adpt_setup_vectors(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + int ret = -ENOENT; + + if (adpt->q_vectors[0]) { + dev_info(&pf->pdev->dev, "adapter %d has existing q_vectors\n", adpt->idx); + return -EEXIST; + } + + if (adpt->base_vector) { + dev_info(&pf->pdev->dev, "adapter %d has non-zero base vector %d\n", adpt->idx, + adpt->base_vector); + return -EEXIST; + } + + ret = ne6x_adpt_alloc_q_vectors(adpt); + if (ret) { + dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for adapter %d, ret=%d\n", + adpt->num_q_vectors, adpt->idx, ret); + adpt->num_q_vectors = 0; + goto vector_setup_out; + } + + if (adpt->num_q_vectors) + adpt->base_vector = adpt->port_info->hw_queue_base; + + if (adpt->base_vector < 0) { + dev_info(&pf->pdev->dev, "failed to get tracking for %d vectors for adapter %d, err=%d\n", + adpt->num_q_vectors, adpt->idx, adpt->base_vector); + ne6x_adpt_free_q_vectors(adpt); + ret = -ENOENT; + goto vector_setup_out; + } + +vector_setup_out: + return ret; +} + +static void ne6x_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct ne6x_q_vector *q_vector = + container_of(notify, struct ne6x_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +static void ne6x_irq_affinity_release(struct kref *ref) {} + +int ne6x_adpt_request_irq_msix(struct ne6x_adapter *adpt, char *basename) +{ + int q_vectors = adpt->num_q_vectors; + struct ne6x_pf *pf = adpt->back; + int base = adpt->base_vector; + int rx_int_idx = 0; + int tx_int_idx = 0; + int vector, err; + int irq_num; + int cpu; + + for (vector = 0; vector < q_vectors; vector++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[vector]; + + irq_num = pf->msix_entries[base + vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(irq_num, adpt->irq_handler, 0, q_vector->name, q_vector); + if (err) { + dev_info(&pf->pdev->dev, "MSIX request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ne6x_irq_affinity_notify; + q_vector->affinity_notify.release = ne6x_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* Spread affinity hints out across online CPUs. + * + * get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to pass to + * irq_set_affinity_hint without making a copy. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + adpt->irqs_ready = true; + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = pf->msix_entries[base + vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adpt->q_vectors[vector]); + } + + return err; +} + +static irqreturn_t ne6x_intr(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6x_adapter *adpt = q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + u64 reg_val; + + reg_val = rd64(hw, NE6X_VPINT_DYN_CTLN(0, NE6X_VP_INT)); + if (!(reg_val & 0x10000)) + return IRQ_NONE; + + napi_schedule(&q_vector->napi); + return IRQ_HANDLED; +} + +int ne6x_adpt_request_irq_intx(struct ne6x_adapter *adpt, char *basename) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[0]; + struct net_device *netdev = adpt->netdev; + struct ne6x_pf *pf = adpt->back; + u32 irq = pf->pdev->irq; + int err; + + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-INTx", basename, "TxRx"); + + err = request_irq(irq, &ne6x_intr, IRQF_SHARED, netdev->name, q_vector); + if (err) { + dev_info(&pf->pdev->dev, "INTx request_irq failed, error: %d\n", err); + return err; + } + + return 0; +} + +int ne6x_adpt_request_irq(struct ne6x_adapter *adpt, char *basename) +{ + struct ne6x_pf *pf = adpt->back; + int err; + + if (test_bit(NE6X_PF_MSIX, pf->state)) + err = ne6x_adpt_request_irq_msix(adpt, basename); + else + err = ne6x_adpt_request_irq_intx(adpt, basename); + + if (err) + dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); + + return err; +} + +void ne6x_adpt_configure_msix(struct ne6x_adapter *adpt) +{ + union ne6x_vp_int_mask int_mask; + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + union ne6x_int_cfg int_cfg; + u32 qp, nextqp; + int i, q; + + /* The interrupt indexing is offset by 1 in the PFINT_ITRn + * and PFINT_LNKLSTn registers, e.g.: + * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) + */ + qp = adpt->base_queue; + + /* SRIOV mode VF Config OR SRIOV disabled PF Config */ + if (qp < NE6X_PF_VP0_NUM) { + for (i = 0; i < adpt->num_q_vectors; i++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[i]; + + for (q = 0; q < q_vector->num_ringpairs; q++) { + nextqp = qp + i + q; + + int_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_INT_CFG)); + int_cfg.reg.csr_sq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_rq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_cq_hdle_half_int_cnt_vp = 0xffff; + wr64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_INT_CFG), int_cfg.val); + + int_mask.val = rd64(hw, + NE6X_VPINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp = NE6X_MAX_U64; + wr64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK), + int_mask.val); + } + } + } else { + /* SRIOV mode PF Config */ + for (i = 0; i < adpt->num_q_vectors; i++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[i]; + + for (q = 0; q < q_vector->num_ringpairs; q++) { + nextqp = qp - NE6X_PF_VP0_NUM + i + q; + + int_cfg.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, NE6X_INT_CFG)); + int_cfg.reg.csr_sq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_rq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_cq_hdle_half_int_cnt_vp = 0xffff; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(nextqp, NE6X_INT_CFG), + int_cfg.val); + + int_mask.val = + rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, + NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp = NE6X_MAX_U64; + wr64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK), + int_mask.val); + } + } + } +} + +static inline void ne6x_irq_dynamic_enable(struct ne6x_adapter *adpt, int vector) +{ + union ne6x_vp_int_mask int_mask; + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + + if (vector < NE6X_PF_VP0_NUM) { + int_mask.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vector, NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp &= ~(1ULL << NE6X_VP_CQ_INTSHIFT); + wr64(hw, NE6X_VPINT_DYN_CTLN(vector, NE6X_VP_INT_MASK), int_mask.val); + } else { + int_mask.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(vector - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp &= ~(1ULL << NE6X_VP_CQ_INTSHIFT); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(vector - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), + int_mask.val); + } +} + +int ne6x_adpt_enable_irq(struct ne6x_adapter *adpt) +{ + int i; + + for (i = 0; i < adpt->num_q_vectors; i++) + ne6x_irq_dynamic_enable(adpt, adpt->base_vector + i); + + return 0; +} + +void ne6x_adpt_disable_irq(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int base = adpt->base_vector; + int i; + + /* disable each interrupt */ + if (base < NE6X_PF_VP0_NUM) { + for (i = adpt->base_vector; i < (adpt->num_q_vectors + adpt->base_vector); i++) { + wr64(hw, NE6X_VPINT_DYN_CTLN(i, NE6X_VP_INT), NE6X_MAX_U64); + wr64(hw, NE6X_VPINT_DYN_CTLN(i, NE6X_VP_INT_MASK), NE6X_MAX_U64); + } + } else { + for (i = adpt->base_vector; i < (adpt->num_q_vectors + adpt->base_vector); i++) { + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(i - NE6X_PF_VP0_NUM, NE6X_VP_INT), + NE6X_MAX_U64); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(i - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + NE6X_MAX_U64); + } + } + + if (test_bit(NE6X_PF_MSIX, pf->state)) { + for (i = 0; i < adpt->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } else { + synchronize_irq(pf->pdev->irq); + } +} + +void ne6x_adpt_free_irq(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + int base = adpt->base_vector; + int i; + + if (!adpt->q_vectors) + return; + + if (!adpt->irqs_ready) + return; + + adpt->irqs_ready = false; + for (i = 0; i < adpt->num_q_vectors; i++) { + int irq_num; + u16 vector; + + vector = i + base; + irq_num = pf->msix_entries[vector].vector; + + /* free only the irqs that were actually requested */ + if (!adpt->q_vectors[i] || !adpt->q_vectors[i]->num_ringpairs) + continue; + + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); + + /* remove our suggested affinity mask for this IRQ */ + irq_set_affinity_hint(irq_num, NULL); + + synchronize_irq(irq_num); + free_irq(irq_num, adpt->q_vectors[i]); + } +} + +static void ne6x_reset_interrupt_capability(struct ne6x_pf *pf) +{ + /* If we're in Legacy mode, the interrupt was cleaned in adpt_close */ + if (pf->msix_entries) { + pci_disable_msix(pf->pdev); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + } + + kfree(pf->irq_pile); + pf->irq_pile = NULL; +} + +int ne6x_init_link_irq(struct ne6x_pf *pf) +{ + int irq_num; + int err; + + snprintf(pf->link_intname, sizeof(pf->link_intname) - 1, "%s-%s-%d", + dev_driver_string(&pf->pdev->dev), "link", pf->hw.bus.bus_num); + irq_num = pf->msix_entries[NE6X_NIC_INT_VP].vector; + err = request_irq(irq_num, ne6x_linkint_irq_handler, 0, pf->link_intname, pf); + if (!err) + pf->link_int_irq_ready = true; + + return 0; +} + +int ne6x_enable_link_irq(struct ne6x_pf *pf) +{ + u64 int_mask = 0xffffffffffffffff; + u64 temp = 1; + int i = 0; + + if (!pf->link_int_irq_ready) + return 0; + + for (i = 0; i < pf->hw.pf_port; i++) + int_mask &= ~(temp << (i + NE6X_NIC_INT_START_BIT)); + + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + int_mask); + + return 0; +} + +int ne6x_disable_link_irq(struct ne6x_pf *pf) +{ + u64 int_mask = 0xffffffffffffffff; + u64 int_val; + + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + int_mask); + int_val = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT)); + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT), + int_val); + + return 0; +} + +void ne6x_free_link_irq(struct ne6x_pf *pf) +{ + if (pf->link_int_irq_ready) { + synchronize_irq(pf->msix_entries[NE6X_NIC_INT_VP].vector); + free_irq(pf->msix_entries[NE6X_NIC_INT_VP].vector, pf); + } + + pf->link_int_irq_ready = false; +} + +irqreturn_t ne6x_msix_clean_vf_mbx(int irq, void *data) +{ + struct ne6x_pf *pf = data; + struct ne6x_hw *hw = &pf->hw; + bool have_cmd = false; + struct ne6x_vf *vf; + u64 int_val = 0; + u64 val; + int i; + + val = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT)); + ne6x_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (val & (1ULL << vf->base_queue)) { + test_and_set_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_DETECT; + pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i] = true; + have_cmd = true; + int_val |= (1ULL << vf->base_queue); + } + } + + if (have_cmd) { + ne6x_service_event_schedule(pf); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), int_val); + } + + val = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT)); + ne6x_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (val & (1ULL << vf->base_queue)) { + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), + (1ULL << vf->base_queue)); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + pf->hw.ne6x_mbx_ready_to_send[i] = true; + } + } + + return IRQ_HANDLED; +} + +int ne6x_init_mailbox_irq(struct ne6x_pf *pf) +{ + int irq_num; + int err; + + snprintf(pf->mailbox_intname, sizeof(pf->mailbox_intname) - 1, "%s-%s-%d", + dev_driver_string(&pf->pdev->dev), "mailbox", pf->hw.bus.bus_num); + irq_num = pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector; + err = request_irq(irq_num, ne6x_msix_clean_vf_mbx, 0, pf->mailbox_intname, pf); + if (!err) + pf->mailbox_int_irq_ready = true; + + dev_info(&pf->pdev->dev, "reg mailbox irq id= %d,name = %s\n", irq_num, + pf->mailbox_intname); + + return err; +} + +int ne6x_disable_mailbox_irq(struct ne6x_pf *pf) +{ + struct ne6x_hw *hw = &pf->hw; + + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + return 0; +} + +void ne6x_free_mailbox_irq(struct ne6x_pf *pf) +{ + if (pf->mailbox_int_irq_ready) { + synchronize_irq(pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector); + free_irq(pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector, pf); + } + + pf->mailbox_int_irq_ready = false; +} + +void ne6x_clear_interrupt_scheme(struct ne6x_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + if (pf->adpt[i]) + ne6x_adpt_free_q_vectors(pf->adpt[i]); + } + + ne6x_disable_link_irq(pf); + ne6x_free_link_irq(pf); + ne6x_reset_interrupt_capability(pf); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h new file mode 100644 index 000000000000..e8d512d965a1 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_INTERRUPT_H +#define _NE6X_INTERRUPT_H + +#include "ne6x.h" + +int ne6x_init_interrupt_scheme(struct ne6x_pf *pf); +int ne6x_adpt_setup_vectors(struct ne6x_adapter *adpt); +void ne6x_adpt_free_q_vectors(struct ne6x_adapter *adpt); +int ne6x_adpt_request_irq(struct ne6x_adapter *adpt, char *basename); +void ne6x_adpt_configure_msix(struct ne6x_adapter *adpt); +int ne6x_adpt_enable_irq(struct ne6x_adapter *adpt); +void ne6x_adpt_free_irq(struct ne6x_adapter *adpt); +void ne6x_clear_interrupt_scheme(struct ne6x_pf *pf); +void ne6x_adpt_disable_irq(struct ne6x_adapter *adpt); +irqreturn_t ne6x_linkint_irq_handler(int irq, void *data); +int ne6x_enable_link_irq(struct ne6x_pf *pf); +int ne6x_disable_link_irq(struct ne6x_pf *pf); +int ne6x_init_link_irq(struct ne6x_pf *pf); +void ne6x_free_link_irq(struct ne6x_pf *pf); +int ne6x_init_mailbox_irq(struct ne6x_pf *pf); +void ne6x_free_mailbox_irq(struct ne6x_pf *pf); +int ne6x_disable_mailbox_irq(struct ne6x_pf *pf); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c new file mode 100644 index 000000000000..0344c9957ddb --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c @@ -0,0 +1,3112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "ne6x_debugfs.h" +#include "ne6x_arfs.h" +#include "version.h" +#include "ne6x_netlink.h" +#include "ne6x_interrupt.h" + +#define CREATE_TRACE_POINTS +#include "ne6x_trace.h" + +#define SUMMARY "Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Linux Driver" +#define COPYRIGHT "Copyright(c) 2020 - 2023 Chengdu BeiZhongWangXin Technology Co., Ltd." + +char ne6x_driver_name[] = "ncepf"; + +static const char ne6x_driver_string[] = SUMMARY; + +const char ne6x_driver_version_str[] = VERSION; +static const char ne6x_copyright[] = COPYRIGHT; + +/* ne6x_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ne6x_pci_tbl[] = { + {PCI_VDEVICE(BZWX, 0x5010), 0}, + {PCI_VDEVICE(BZWX, 0x5011), 0}, + {PCI_VDEVICE(BZWX, 0x6010), 0}, + {PCI_VDEVICE(BZWX, 0x6011), 0}, + /* required last entry */ + {0, 0}, +}; + +MODULE_DEVICE_TABLE(pci, ne6x_pci_tbl); +MODULE_AUTHOR("Chengdu BeiZhongWangXin Technology Co., Ltd., "); +MODULE_DESCRIPTION("Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Linux Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); + +static struct workqueue_struct *ne6x_wq; +static const struct net_device_ops ne6x_netdev_ops; + +bool netif_is_ne6x(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &ne6x_netdev_ops); +} + +int ne6x_hw_init(struct ne6x_hw *hw) +{ + int cpu_num = num_online_cpus(); + + /* max phy_port */ + hw->pf_port = ne6x_dev_get_port_num(hw->back); + /* expect vp queue */ + hw->expect_vp = NE6X_MAX_VP_NUM / hw->pf_port; + /* actal max vp queue */ + hw->max_queue = min_t(int, cpu_num, hw->expect_vp); + + hw->port_info = devm_kzalloc(ne6x_hw_to_dev(hw), sizeof(*hw->port_info), GFP_KERNEL); + if (!hw->port_info) + return -EIO; + + /* set the back pointer to HW */ + hw->port_info->hw = hw; + + if (!is_valid_ether_addr(hw->port_info->mac.perm_addr)) + eth_random_addr(hw->port_info->mac.perm_addr); + + return 0; +} + +int ne6x_aq_get_phy_capabilities(struct ne6x_adapter *adpt, bool is_up, bool get_hw_stats) +{ + struct ne6x_port_info *port_info = adpt->port_info; + + /* read link states */ + if (get_hw_stats) + ne6x_dev_get_link_status(adpt, &port_info->link_status); + + if (is_up) { + if (port_info->link_status.link) { + port_info->phy.link_info.link_info |= NE6X_AQ_LINK_UP; + + switch (port_info->link_status.speed) { + case NE6X_LINK_SPEED_10GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_10GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_25GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_40GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_100GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_200GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_info(&adpt->back->pdev->dev, "WARNING: Unrecognized link_speed (0x%x).\n", + NE6X_LINK_SPEED_UNKNOWN); + break; + } + + port_info->phy.media_type = NE6X_MEDIA_FIBER; + return 0; + } + } + + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_UNKNOWN; + port_info->phy.media_type = NE6X_MEDIA_UNKNOWN; + port_info->phy.link_info.link_info &= ~NE6X_AQ_LINK_UP; + + return 0; +} + +int ne6x_aq_get_vf_link_status(struct ne6x_adapter *adpt, bool is_up) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_adapter *pf_adpt = pf->adpt[(adpt->port_info->lport >= pf->hw.pf_port) ? + (pf->hw.pf_port - 1) : adpt->port_info->lport]; + struct ne6x_link_info *pf_link_status = &pf_adpt->port_info->link_status; + struct ne6x_port_info *vf_port_info = adpt->port_info; + + if (is_up) { + if (pf_link_status->link) { + vf_port_info->phy.link_info.link_info |= NE6X_AQ_LINK_UP; + + switch (pf_link_status->speed) { + case NE6X_LINK_SPEED_10GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_10GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_25GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_40GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_100GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_200GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_info(&adpt->back->pdev->dev, "WARNING: Unrecognized link_speed (0x%x).\n", + NE6X_LINK_SPEED_UNKNOWN); + break; + } + + vf_port_info->phy.media_type = NE6X_MEDIA_FIBER; + return 0; + } + } + + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_UNKNOWN; + vf_port_info->phy.media_type = NE6X_MEDIA_UNKNOWN; + vf_port_info->phy.link_info.link_info &= ~NE6X_AQ_LINK_UP; + + return 0; +} + +static void ne6x_adpt_link_event(struct ne6x_adapter *adpt, bool link_up) +{ + if (!adpt) + return; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state) || !adpt->netdev) + return; + + if (link_up == netif_carrier_ok(adpt->netdev)) + return; + + if (link_up) { + netif_carrier_on(adpt->netdev); + netif_tx_wake_all_queues(adpt->netdev); + } else { + netif_carrier_off(adpt->netdev); + netif_tx_stop_all_queues(adpt->netdev); + } +} + +void ne6x_print_link_message(struct ne6x_adapter *adpt, bool isup) +{ + char *speed = "Unknown "; + char *an = "False"; + u16 new_speed; + + if (isup) + new_speed = adpt->port_info->phy.link_info.link_speed; + else + new_speed = NE6X_LINK_SPEED_UNKNOWN; + + if (adpt->current_isup == isup && adpt->current_speed == new_speed) + return; + + adpt->current_isup = isup; + adpt->current_speed = new_speed; + + if (!isup) { + netdev_info(adpt->netdev, "NIC Link is Down\n"); + return; + } + + switch (adpt->port_info->phy.link_info.link_speed) { + case NE6X_LINK_SPEED_40GB: + speed = "40 G"; + break; + case NE6X_LINK_SPEED_100GB: + speed = "100 G"; + break; + case NE6X_LINK_SPEED_10GB: + speed = "10 G"; + break; + case NE6X_LINK_SPEED_25GB: + speed = "25 G"; + break; + case NE6X_LINK_SPEED_200GB: + speed = "200 G"; + break; + default: + break; + } + + if (adpt->port_info->phy.link_info.an_info) + an = "True"; + + netdev_info(adpt->netdev, "NIC Link is Up, %sbps Full Duplex, Autoneg: %s\n", speed, an); +} + +static void ne6x_link_event(struct ne6x_pf *pf) +{ + struct ne6x_phy_info *phy_info; + struct ne6x_adapter *adpt = NULL; + u32 old_link_speed; + bool old_link; + bool link_up; + int i; +#ifdef CONFIG_PCI_IOV + struct ne6x_vf *vf; + int vf_id; +#endif + + for (i = 0; i < pf->num_alloc_adpt; i++) { + link_up = false; + adpt = pf->adpt[i]; + phy_info = &adpt->port_info->phy; + phy_info->link_info_old = phy_info->link_info; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + ne6x_aq_get_phy_capabilities(adpt, false, true); + else + ne6x_aq_get_phy_capabilities(adpt, true, true); + + /* add sfp online state begin */ + ne6x_dev_get_sfp_status(adpt, &phy_info->link_info.ext_info); + if (phy_info->link_info.ext_info != phy_info->link_info_old.ext_info) { + if (phy_info->link_info.ext_info == 0) + netdev_info(adpt->netdev, "adpt->id= %d,optical module unplugged", + adpt->idx); + else + netdev_info(adpt->netdev, "adpt->id= %d,optical module plugged", + adpt->idx); + } + + /* end sfp online state */ + old_link = !!(adpt->port_info->phy.link_info_old.link_info & NE6X_AQ_LINK_UP); + old_link_speed = adpt->port_info->phy.link_info_old.link_speed; + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) + link_up = true; + + /* if the old link up/down is the same as the new */ + if (link_up == old_link) { + if (link_up && old_link_speed != adpt->port_info->phy.link_info.link_speed) + ne6x_print_link_message(adpt, link_up); + + continue; + } + + ne6x_adpt_link_event(adpt, link_up); + ne6x_print_link_message(adpt, link_up); + } + +#ifdef CONFIG_PCI_IOV + ne6x_for_each_vf(pf, vf_id) { + vf = &pf->vf[vf_id]; + adpt = vf->adpt; + + if (test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + if (!vf->rx_tx_state) { + adpt->port_info->phy.link_info.link_info = 0x0; + vf->rx_tx_state = true; + } + link_up = false; + phy_info = &adpt->port_info->phy; + phy_info->link_info_old = phy_info->link_info; + ne6x_aq_get_vf_link_status(adpt, true); + old_link = !!(adpt->port_info->phy.link_info_old.link_info + & NE6X_AQ_LINK_UP); + old_link_speed = adpt->port_info->phy.link_info_old.link_speed; + + if (adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) + link_up = true; + + if (link_up == old_link && + old_link_speed == adpt->port_info->phy.link_info.link_speed) + continue; + + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_DETECT; + ne6x_vc_notify_link_state(vf); + } + } +#endif +} + +static void ne6x_clean_link_status_subtask(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_LINK_POOLING, pf->state)) + return; + + ne6x_link_event(pf); +} + +void ne6x_service_event_schedule(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->serv_task); +} + +static void ne6x_adpt_reinit_locked(struct ne6x_adapter *adpt); + +static void ne6x_do_reset(struct ne6x_pf *pf, u32 reset_flags, bool lock_acquired) +{ + struct ne6x_adapter *adpt = NULL; + int i; + + WARN_ON(in_interrupt()); + + if (reset_flags & BIT_ULL(NE6X_PF_RESET_REQUESTED)) { + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_RECOVER, adpt->comm.state)) { + ne6x_adpt_reinit_locked(adpt); + clear_bit(NE6X_ADPT_RECOVER, adpt->comm.state); + } + } + } else if (reset_flags & BIT_ULL(NE6X_CORE_RESET_REQUESTED)) { + /* hardware reset:include PCIE,CORE.etc. */ + dev_info(&pf->pdev->dev, "timeout info: CORE reset\n"); + } else { + dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); + } +} + +static void ne6x_recover_hang_subtask(struct ne6x_pf *pf) +{ + u32 reset_flags = 0; + + if (test_and_clear_bit(NE6X_PF_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_PF_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_CORE_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_CORE_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_GLOBAL_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_GLOBAL_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_DOWN_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_DOWN_REQUESTED); + + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + if (test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) { + clear_bit(NE6X_RESET_INTR_RECEIVED, pf->state); + test_and_clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + } + + /* If we're already down or resetting, just bail */ + if (reset_flags && !test_bit(NE6X_DOWN, pf->state) && + !test_bit(NE6X_CONFIG_BUSY, pf->state)) + ne6x_do_reset(pf, reset_flags, false); +} + +static void ne6x_service_timer(struct timer_list *t) +{ + struct ne6x_pf *pf = from_timer(pf, t, serv_tmr); + + if (pf->num_alloc_vfs) + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->service_timer_period)); + + ne6x_service_event_schedule(pf); +} + +void ne6x_linkscan_schedule(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->linkscan_work); +} + +static void ne6x_linkscan_timer(struct timer_list *t) +{ + struct ne6x_pf *pf = from_timer(pf, t, linkscan_tmr); + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) + mod_timer(&pf->linkscan_tmr, round_jiffies(jiffies + HZ)); + else + mod_timer(&pf->linkscan_tmr, round_jiffies(jiffies + HZ * 30)); + + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->linkscan_work); +} + +static void ne6x_service_task(struct work_struct *work) +{ + struct ne6x_pf *pf = container_of(work, struct ne6x_pf, serv_task); + unsigned long start_time = jiffies; + +#ifdef CONFIG_PCI_IOV + /* vf command process */ + ne6x_vc_process_vf_msg(pf); +#endif + + ne6x_recover_hang_subtask(pf); + + ne6x_sync_arfs_fltrs(pf); + + /* If the tasks have taken longer than one timer cycle or there + * is more work to be done, reschedule the service task now + * rather than wait for the timer to tick again. + */ + if (time_after(jiffies, (start_time + pf->service_timer_period)) || + test_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state) || + test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) + ne6x_service_event_schedule(pf); +} + +static void ne6x_linkscan_work(struct work_struct *work) +{ + struct ne6x_pf *pf = container_of(work, struct ne6x_pf, linkscan_work); + + ne6x_clean_link_status_subtask(pf); +} + +irqreturn_t ne6x_linkint_irq_handler(int irq, void *data) +{ + struct ne6x_pf *pf = data; + u64 intval = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + + wr64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, + NE6X_VP_INT), + intval); + ne6x_linkscan_schedule(pf); + + return IRQ_HANDLED; +} + +int ne6x_pf_init(struct ne6x_pf *pf) +{ + pf->ctrl_adpt_idx = 0; + mutex_init(&pf->switch_mutex); + + /* set up periodic task facility */ + timer_setup(&pf->serv_tmr, ne6x_service_timer, 0); + pf->service_timer_period = HZ; + timer_setup(&pf->linkscan_tmr, ne6x_linkscan_timer, 0); + add_timer(&pf->serv_tmr); + + INIT_WORK(&pf->serv_task, ne6x_service_task); + INIT_WORK(&pf->linkscan_work, ne6x_linkscan_work); + + clear_bit(NE6X_SERVICE_SCHED, pf->state); + + pf->next_adpt = 0; + pf->num_alloc_adpt = pf->hw.pf_port; + pf->num_alloc_vfs = 0; + pf->mailbox_int_irq_ready = false; + pf->link_int_irq_ready = false; + + ne6x_dbg_pf_init(pf); + ne6x_proc_pf_init(pf); + + /* init key list head node */ + spin_lock_init(&pf->key_list_lock); + INIT_LIST_HEAD(&pf->key_filter_list); + + return 0; +} + +static void ne6x_set_num_rings_in_adpt(struct ne6x_adapter *adpt) +{ + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + adpt->num_tx_desc = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adpt->num_rx_desc = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adpt->num_cq_desc = adpt->num_tx_desc + adpt->num_rx_desc; + adpt->num_tg_desc = adpt->num_tx_desc; + adpt->irqs_ready = false; +} + +static irqreturn_t ne6x_msix_clean_rings(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6x_adapter *adpt = (struct ne6x_adapter *)q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + + if (!q_vector->tx.ring && !q_vector->rx.ring && !q_vector->cq.ring && !q_vector->tg.ring) + return IRQ_HANDLED; + + if (q_vector->reg_idx < NE6X_PF_VP0_NUM) + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT_MASK), + 0xffffffffffffffff); + else + wr64_bar4(hw, + NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), + 0xffffffffffffffff); + + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +int ne6x_adpt_mem_alloc(struct ne6x_pf *pf, struct ne6x_adapter *adpt) +{ + struct ne6x_ring **next_rings; + int ret = -ENODEV; + int size; + + /* Need to protect the allocation of the adapters at the PF level */ + mutex_lock(&pf->switch_mutex); + + adpt->netdev_registered = false; + size = sizeof(struct ne6x_ring *) * adpt->num_queue * 4; + adpt->tx_rings = kzalloc(size, GFP_KERNEL); + if (!adpt->tx_rings) + goto err_rings; + + next_rings = adpt->tx_rings + adpt->num_queue; + adpt->cq_rings = next_rings; + next_rings += adpt->num_queue; + adpt->rx_rings = next_rings; + adpt->tg_rings = adpt->rx_rings + adpt->num_queue; + + /* allocate memory for q_vector pointers */ + size = sizeof(struct ne6x_q_vector *) * adpt->num_q_vectors; + adpt->q_vectors = kzalloc(size, GFP_KERNEL); + if (!adpt->q_vectors) { + kfree(adpt->tx_rings); + ret = -ENOMEM; + goto err_rings; + } + + /* Setup default MSIX irq handler for adapter */ + ne6x_adpt_setup_irqhandler(adpt, ne6x_msix_clean_rings); + ret = 0; + +err_rings: + mutex_unlock(&pf->switch_mutex); + return ret; +} + +static int ne6x_force_link_state(struct ne6x_adapter *adpt, bool is_up) +{ + int err; + + err = ne6x_aq_get_phy_capabilities(adpt, is_up, true); + if (err) + return err; + + if (is_up) + test_and_set_bit(NE6X_LINK_POOLING, adpt->back->state); + + return 0; +} + +int ne6x_adpt_restart_vp(struct ne6x_adapter *adpt, bool enable) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int i, pf_q; + + pf_q = adpt->base_queue; + for (i = 0; i < adpt->num_queue; i++, pf_q++) { + if (pf_q < NE6X_PF_VP0_NUM) + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_VP_RELOAD), enable); + else + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_VP_RELOAD), + enable); + + usleep_range(1000, 2000); + if (!enable) { + ne6x_tail_update(adpt->rx_rings[i], 0); + ne6x_tail_update(adpt->tx_rings[i], 0); + } + } + + return 0; +} + +int ne6x_adpt_configure(struct ne6x_adapter *adpt) +{ + int err; + int i; + + err = ne6x_adpt_restart_vp(adpt, true); + if (!err) + err = ne6x_adpt_configure_tx(adpt); + + if (!err) + err = ne6x_adpt_configure_cq(adpt); + + if (!err) + err = ne6x_adpt_configure_rx(adpt); + + if (!err) + err = ne6x_adpt_restart_vp(adpt, false); + + if (!err) { + for (i = 0; i < adpt->num_queue && !err; i++) + ne6x_alloc_rx_buffers(adpt->rx_rings[i], + NE6X_DESC_UNUSED(adpt->rx_rings[i])); + } + + return err; +} + +static void ne6x_napi_enable_all(struct ne6x_adapter *adpt) +{ + int q_idx; + + if (!adpt->netdev) + return; + + for (q_idx = 0; q_idx < adpt->num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[q_idx]; + + if (q_vector->tx.ring || q_vector->rx.ring || q_vector->cq.ring) + napi_enable(&q_vector->napi); + } +} + +static int ne6x_up_complete(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + ne6x_adpt_configure_msix(adpt); + + clear_bit(NE6X_ADPT_DOWN, adpt->comm.state); + ne6x_napi_enable_all(adpt); + ne6x_adpt_enable_irq(adpt); + + if ((adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) && adpt->netdev) { + ne6x_print_link_message(adpt, true); + netif_tx_start_all_queues(adpt->netdev); + netif_carrier_on(adpt->netdev); + } + + /* On the next run of the service_task, notify any clients of the new + * opened netdev + */ + set_bit(NE6X_CLIENT_SERVICE_REQUESTED, pf->state); + ne6x_linkscan_schedule(pf); + + return 0; +} + +static void ne6x_napi_disable_all(struct ne6x_adapter *adpt) +{ + int q_idx; + + if (!adpt->netdev) + return; + + for (q_idx = 0; q_idx < adpt->num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[q_idx]; + + if (q_vector->tx.ring || q_vector->rx.ring || q_vector->cq.ring) + napi_disable(&q_vector->napi); + } +} + +static void ne6x_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +void ne6x_clean_tx_ring(struct ne6x_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buf) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + ne6x_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_buf[i]); + + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); +} + +void ne6x_clean_rx_ring(struct ne6x_ring *rx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buf) + return; + + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ne6x_rx_buf *rx_bi = &rx_ring->rx_buf[i]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, rx_bi->page_offset, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; +} + +static void ne6x_clean_cq_ring(struct ne6x_ring *cq_ring) +{ + /* Zero out the descriptor ring */ + memset(cq_ring->desc, 0, cq_ring->size); + + cq_ring->next_to_clean = 0; + cq_ring->next_to_use = 0; +} + +void ne6x_down(struct ne6x_adapter *adpt) +{ + int i; + + /* It is assumed that the caller of this function + * sets the adpt->comm.state NE6X_ADPT_DOWN bit. + */ + if (adpt->netdev) { + netif_carrier_off(adpt->netdev); + netif_tx_disable(adpt->netdev); + } + + ne6x_adpt_disable_irq(adpt); + ne6x_adpt_restart_vp(adpt, true); + ne6x_force_link_state(adpt, false); + ne6x_napi_disable_all(adpt); + + for (i = 0; i < adpt->num_queue; i++) { + ne6x_clean_tx_ring(adpt->tx_rings[i]); + ne6x_clean_cq_ring(adpt->cq_rings[i]); + ne6x_clean_rx_ring(adpt->rx_rings[i]); + } +} + +void ne6x_free_rx_resources(struct ne6x_ring *rx_ring) +{ + ne6x_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +static void ne6x_adpt_free_rx_resources(struct ne6x_adapter *adpt) +{ + int i; + + if (!adpt->rx_rings) + return; + + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->rx_rings[i] && adpt->rx_rings[i]->desc) + ne6x_free_rx_resources(adpt->rx_rings[i]); + } +} + +void ne6x_free_tx_resources(struct ne6x_ring *tx_ring) +{ + ne6x_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +void ne6x_free_cq_resources(struct ne6x_ring *cq_ring) +{ + ne6x_clean_cq_ring(cq_ring); + if (cq_ring->desc) { + dma_free_coherent(cq_ring->dev, cq_ring->size, cq_ring->desc, cq_ring->dma); + cq_ring->desc = NULL; + } +} + +static void ne6x_adpt_free_tx_resources(struct ne6x_adapter *adpt) +{ + int i; + + if (adpt->tx_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->tx_rings[i] && adpt->tx_rings[i]->desc) + ne6x_free_tx_resources(adpt->tx_rings[i]); + kfree(adpt->tx_rings[i]->sgl); + } + } + + if (adpt->cq_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->cq_rings[i] && adpt->cq_rings[i]->desc) + ne6x_free_cq_resources(adpt->cq_rings[i]); + } + } + + if (adpt->tg_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->tg_rings[i] && adpt->tg_rings[i]->desc) + /* tg_ring == cq_ring */ + ne6x_free_cq_resources(adpt->tg_rings[i]); + } + } +} + +int ne6x_up(struct ne6x_adapter *adpt) +{ + int err; + + ne6x_force_link_state(adpt, true); + + err = ne6x_adpt_configure(adpt); + if (!err) + err = ne6x_up_complete(adpt); + + return err; +} + +int ne6x_adpt_open(struct ne6x_adapter *adpt) +{ + char int_name[NE6X_INT_NAME_STR_LEN]; + struct ne6x_pf *pf = adpt->back; + int err; + + /* allocate descriptors */ + err = ne6x_adpt_setup_tx_resources(adpt); + if (err) + goto err_setup_tx; + + err = ne6x_adpt_setup_rx_resources(adpt); + if (err) + goto err_setup_rx; + + err = ne6x_adpt_configure(adpt); + if (err) + goto err_setup_rx; + + if (adpt->netdev) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), + adpt->netdev->name); + err = ne6x_adpt_request_irq(adpt, int_name); + if (err) + goto err_setup_rx; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adpt->netdev, adpt->num_queue); + if (err) + goto err_set_queues; + + /* When reducing the number of Tx queues, any pre-existing + * skbuffs might target a now removed queue. Older versions of + * the Linux kernel do not check for this, and it can result + * in a kernel panic. Avoid this by flushing all skbs now, so + * that we avoid attempting to transmit one that has an + * invalid queue mapping. + */ + qdisc_reset_all_tx_gt(adpt->netdev, 0); + + err = netif_set_real_num_rx_queues(adpt->netdev, adpt->num_queue); + if (err) + goto err_set_queues; + } else { + err = -EINVAL; + goto err_setup_rx; + } + + err = ne6x_up_complete(adpt); + if (err) + goto err_up_complete; + + ne6x_dev_set_tx_rx_state(adpt, true, true); + return 0; + +err_up_complete: + ne6x_down(adpt); +err_set_queues: + ne6x_adpt_free_irq(adpt); +err_setup_rx: + ne6x_adpt_free_rx_resources(adpt); +err_setup_tx: + ne6x_adpt_free_tx_resources(adpt); + + return err; +} + +int ne6x_open(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int err; + + netdev_info(netdev, "open !!!\n"); + set_bit(NE6X_ADPT_OPEN, adpt->comm.state); + + netif_carrier_off(netdev); + + if (ne6x_force_link_state(adpt, true)) + return -EAGAIN; + + err = ne6x_adpt_open(adpt); + if (err) + return err; + + ne6x_sync_features(netdev); + + ne6x_dev_set_if_state(adpt, NE6000_IF_INTERFACE_UP); + + return 0; +} + +void ne6x_adpt_close(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + ne6x_dev_set_tx_rx_state(adpt, false, false); + if (!test_and_set_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + ne6x_down(adpt); + + ne6x_adpt_free_irq(adpt); + ne6x_adpt_free_tx_resources(adpt); + ne6x_adpt_free_rx_resources(adpt); + set_bit(NE6X_CLIENT_SERVICE_REQUESTED, pf->state); +} + +int ne6x_close(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + clear_bit(NE6X_ADPT_OPEN, adpt->comm.state); + adpt->current_isup = false; + adpt->current_speed = NE6X_LINK_SPEED_UNKNOWN; + ne6x_adpt_close(adpt); + if (test_bit(NE6X_ADPT_F_LINKDOWN_ON_CLOSE, adpt->flags)) + ne6x_dev_set_if_state(adpt, NE6000_IF_INTERFACE_DOWN); + + netdev_info(netdev, "close !!!\n"); + + return 0; +} + +static void ne6x_adpt_reinit_locked(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + WARN_ON(in_interrupt()); + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) + usleep_range(1000, 2000); + + ne6x_down(adpt); + ne6x_up(adpt); + clear_bit(NE6X_CONFIG_BUSY, pf->state); +} + +static int ne6x_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + NE6X_PACKET_HDR_PAD; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (new_mtu < NE6X_MIN_MTU_SIZE) { + netdev_err(netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > NE6X_MAX_RXBUFFER) { + netdev_err(netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) { + if (adpt->back->num_alloc_vfs == 0) + ne6x_adpt_reinit_locked(adpt); + } + + return 0; +} + +static void ne6x_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue) +{ + struct ne6x_ring *tx_ring = NULL; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + unsigned int hung_queue = 0; + u64 head, intr, tail; + + hung_queue = txqueue; + tx_ring = adpt->tx_rings[hung_queue]; + pf->tx_timeout_count++; + + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; /* reset after some time */ + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) + return; /* don't do any new action before the next timeout */ + + /* don't kick off another recovery if one is already pending */ + if (test_and_set_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state)) + return; + + if (tx_ring) { + if (tx_ring->reg_idx < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, NE6X_SQ_HD_POINTER)); + /* Read interrupt register */ + intr = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, NE6X_VP_INT)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, + NE6X_SQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + intr = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + } + + netdev_info(netdev, "tx_timeout: adapter: %u, Q: %u, NTC: 0x%x, HEAD: 0x%llx, NTU: 0x%x, TAIL: 0x%llx, INTR: 0x%llx\n", + adpt->idx, hung_queue, tx_ring->next_to_clean, head, + tx_ring->next_to_use, tail, intr); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(NE6X_ADPT_RECOVER, adpt->comm.state); + set_bit(NE6X_PF_RESET_REQUESTED, pf->state); + set_bit(NE6X_RESET_INTR_RECEIVED, pf->state); + break; + case 2: + set_bit(NE6X_CORE_RESET_REQUESTED, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); + set_bit(NE6X_DOWN_REQUESTED, pf->state); + set_bit(NE6X_ADPT_DOWN_REQUESTED, adpt->comm.state); + break; + } + + ne6x_service_event_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +static void ne6x_get_netdev_stats_struct_tx(struct ne6x_ring *ring, struct rtnl_link_stats64 *stats) +{ + u64 bytes, packets; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + + stats->tx_packets += packets; + stats->tx_bytes += bytes; +} + +struct rtnl_link_stats64 *ne6x_get_adpt_stats_struct(struct ne6x_adapter *adpt) +{ + return &adpt->net_stats; +} + +static void ne6x_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct rtnl_link_stats64 *adpt_stats = ne6x_get_adpt_stats_struct(adpt); + struct ne6x_ring *tx_ring, *rx_ring; + u64 bytes, packets; + unsigned int start; + int i; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + if (!adpt->tx_rings) + return; + + rcu_read_lock(); + for (i = 0; i < adpt->num_queue; i++) { + tx_ring = READ_ONCE(adpt->tx_rings[i]); + if (!tx_ring) + continue; + + ne6x_get_netdev_stats_struct_tx(tx_ring, stats); + rx_ring = &tx_ring[2]; + + do { + start = u64_stats_fetch_begin(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + adpt_stats->rx_dropped = 0; + rcu_read_unlock(); + + /* following stats updated by ne6x_watchdog_subtask() */ + stats->multicast = adpt_stats->multicast; + stats->tx_errors = adpt_stats->tx_errors; + stats->tx_dropped = adpt_stats->tx_dropped; + stats->rx_errors = adpt_stats->rx_errors; + stats->rx_dropped = adpt_stats->rx_dropped; + stats->rx_crc_errors = adpt_stats->rx_crc_errors; + stats->rx_length_errors = adpt_stats->rx_length_errors; +} + +void ne6x_update_pf_stats(struct ne6x_adapter *adpt) +{ + struct rtnl_link_stats64 *ons; + struct rtnl_link_stats64 *ns; /* netdev stats */ + struct ne6x_eth_stats *oes; + struct ne6x_eth_stats *es; /* device's eth stats */ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + u32 tx_restart, tx_busy; + u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; + struct vf_stat vf_stat; + u64 tx_linearize; + u64 tx_force_wb; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u64 tx_e, rx_e; + u64 rx_l, rx_c; + u16 i; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + ns = ne6x_get_adpt_stats_struct(adpt); + ons = &adpt->net_stats_offsets; + es = &adpt->eth_stats; + oes = &adpt->eth_stats_offsets; + + rx_p = 0; + rx_b = 0; + tx_p = 0; + tx_b = 0; + rx_e = 0; + tx_e = 0; + rx_c = 0; + rx_l = 0; + tx_force_wb = 0; + tx_linearize = 0; + tx_busy = 0; + tx_restart = 0; + rx_page = 0; + rx_buf = 0; + + rcu_read_lock(); + for (i = 0; i < adpt->num_queue; i++) { + /* locate Tx ring */ + tx_ring = READ_ONCE(adpt->tx_rings[i]); + + do { + start = u64_stats_fetch_begin(&tx_ring->syncp); + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); + + tx_b += bytes; + tx_p += packets; + tx_restart += tx_ring->tx_stats.restart_q; + tx_busy += tx_ring->tx_stats.tx_busy; + tx_linearize += tx_ring->tx_stats.tx_linearize; + tx_e += tx_ring->tx_stats.csum_err + tx_ring->tx_stats.tx_drop_addr + + tx_ring->tx_stats.tx_pcie_read_err; + + rx_ring = &tx_ring[2]; + + do { + start = u64_stats_fetch_begin(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); + + rx_b += bytes; + rx_p += packets; + rx_buf += rx_ring->rx_stats.alloc_buf_failed; + rx_page += rx_ring->rx_stats.alloc_page_failed; + rx_e += rx_ring->rx_stats.csum_err + rx_ring->rx_stats.rx_err + + rx_ring->rx_stats.rx_mem_error; + rx_l += rx_ring->rx_stats.rx_mem_error; + } + + rcu_read_unlock(); + + adpt->tx_restart = tx_restart; + adpt->tx_busy = tx_busy; + adpt->rx_page_failed = rx_page; + adpt->rx_buf_failed = rx_buf; + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + ns->tx_errors = tx_e; + ns->rx_errors = rx_e; + ns->rx_length_errors = rx_l; + ns->rx_crc_errors = rx_c; + + ns->rx_dropped = 0; + ne6x_dev_get_vf_stat(adpt, &vf_stat); + es->rx_broadcast = vf_stat.rx_broadcast_pkts; + es->rx_miss = vf_stat.rx_drop_pkts; + es->rx_multicast = vf_stat.rx_multicast_pkts; + es->rx_unicast = vf_stat.rx_unicast_pkts; + es->tx_broadcast = vf_stat.tx_broadcast_pkts; + es->tx_multicast = vf_stat.tx_multicast_pkts; + es->tx_unicast = vf_stat.tx_unicast_pkts; + es->rx_malform = vf_stat.rx_malform_pkts; + es->tx_malform = vf_stat.tx_malform_pkts; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ne6x_netpoll(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + for (i = 0; i < adpt->num_q_vectors; i++) + ne6x_msix_clean_rings(0, adpt->q_vectors[i]); +} +#endif + +static int ne6x_set_mac(struct net_device *netdev, void *p) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_mac_info *mac = &adpt->port_info->mac; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); + return 0; + } + + if (ether_addr_equal(mac->perm_addr, addr->sa_data)) + netdev_info(netdev, "returning to hw mac address %pM\n", mac->perm_addr); + else + netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + + ne6x_adpt_del_mac(adpt, mac->perm_addr, true); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(mac->perm_addr, addr->sa_data, netdev->addr_len); + ne6x_adpt_add_mac(adpt, mac->perm_addr, true); + ne6x_dev_set_port_mac(adpt, mac->perm_addr); + + return 0; +} + +static int ne6x_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_vlan vlan; + int ret; + + netdev_info(netdev, "vlan_rx_add_vid proto = 0x%04X vid = %d\n", proto, vid); + + if (!vid) + return 0; + + /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged + * packets aren't pruned by the device's internal switch on Rx + */ + vlan = NE6X_VLAN(be16_to_cpu(proto), vid, 0); + + if (vlan.vid > 0 && vlan.vid < (VLAN_N_VID - 1)) { + ret = ne6x_adpt_add_vlan(adpt, vlan); + if (!ret) + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } else { + return -EINVAL; + } + + return ret; +} + +static int ne6x_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_vlan vlan; + int ret; + + netdev_info(netdev, "vlan_rx_add_vid proto = 0x%04X vid = %d\n", proto, vid); + + if (!vid) + return 0; + + /* Make sure VLAN delete is successful before updating VLAN + * information + */ + vlan = NE6X_VLAN(be16_to_cpu(proto), vid, 0); + ret = ne6x_adpt_del_vlan(adpt, vlan); + if (ret) + return ret; + + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + + return 0; +} + +static struct mac_addr_node *ne6x_find_addr(struct ne6x_adapter *adpt, + const u8 *macaddr, bool is_unicast) +{ + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + + if (!macaddr) + return NULL; + + if (is_unicast) + addr_head = &adpt->uc_mac_addr; + else + addr_head = &adpt->mc_mac_addr; + + list_for_each_entry(addr_node, &addr_head->list, list) { + if (ether_addr_equal(macaddr, addr_node->addr)) + return addr_node; + } + + return NULL; +} + +int ne6x_adpt_add_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_adapter *adpt, u8 *mac); + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + int rc = 0; + + if (!addr) + return -EINVAL; + + if (is_unicast) { + addr_head = &adpt->uc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_add_unicast; + } else { + addr_head = &adpt->mc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_add_multicast; + } + + mutex_lock(&addr_head->mutex); + + if (ne6x_find_addr(adpt, addr, is_unicast)) + goto out_unlock; + + /* Update MAC list value */ + addr_node = kzalloc(sizeof(*addr_node), GFP_KERNEL); + if (!addr_node) { + rc = -ENOMEM; + goto out_unlock; + } + + ether_addr_copy(addr_node->addr, addr); + list_add_tail(&addr_node->list, &addr_head->list); + /* Send the value of the updated MAC linked list to the SDK */ + ne6x_vc_cfg_mac(adpt, addr_node->addr); + +out_unlock: + mutex_unlock(&addr_head->mutex); + + return rc; +} + +int ne6x_adpt_del_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_adapter *adpt, u8 *mac); + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + + if (is_unicast) { + addr_head = &adpt->uc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_del_unicast; + } else { + addr_head = &adpt->mc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_del_multicast; + } + + mutex_lock(&addr_head->mutex); + addr_node = ne6x_find_addr(adpt, addr, is_unicast); + if (!addr_node) + goto out_unlock; + + list_del(&addr_node->list); + ne6x_vc_cfg_mac(adpt, addr_node->addr); + kfree(addr_node); + +out_unlock: + mutex_unlock(&addr_head->mutex); + + return 0; +} + +static int ne6x_mc_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_add_mac(adpt, addr, false); +} + +static int ne6x_mc_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_del_mac(adpt, addr, false); +} + +static int ne6x_uc_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_add_mac(adpt, addr, true); +} + +static int ne6x_uc_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_del_mac(adpt, addr, true); +} + +void ne6x_adpt_clear_ddos(struct ne6x_pf *pf) +{ + u32 data; + + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); +} + +int ne6x_adpt_clear_mac_vlan(struct ne6x_adapter *adpt) +{ + struct mac_addr_node *temp_node = NULL, *addr_node = NULL; + struct ne6x_vlan_filter *f = NULL, *temp_filter = NULL; + struct mac_addr_head *addr_head = NULL; + struct list_head temp_header; + int ret = 0; + + INIT_LIST_HEAD(&temp_header); + spin_lock_bh(&adpt->mac_vlan_list_lock); + list_for_each_entry(f, &adpt->vlan_filter_list, list) { + if (f->vlan.vid) { + temp_filter = kzalloc(sizeof(*temp_filter), GFP_ATOMIC); + memcpy(temp_filter, f, sizeof(struct ne6x_vlan_filter)); + list_add_tail(&temp_filter->list, &temp_header); + } + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + list_for_each_entry_safe(f, temp_filter, &temp_header, list) { + if (f->vlan.vid) + ret |= ne6x_adpt_del_vlan(adpt, f->vlan); + + list_del(&f->list); + kfree(f); + } + + addr_head = &adpt->uc_mac_addr; + mutex_lock(&addr_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &addr_head->list, list) { + ret |= ne6x_dev_del_unicast(adpt, addr_node->addr); + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&addr_head->mutex); + + addr_head = &adpt->mc_mac_addr; + mutex_lock(&addr_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &addr_head->list, list) { + ret |= ne6x_dev_del_multicast(adpt, addr_node->addr); + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&addr_head->mutex); + + return ret; +} + +static void ne6x_set_rx_mode_task(struct work_struct *work) +{ + struct ne6x_adapter *adpt = container_of(work, struct ne6x_adapter, set_rx_mode_task); + struct net_device *netdev = adpt->netdev; + + /* Check for Promiscuous modes */ + if (netdev->flags & IFF_PROMISC) { + ne6x_dev_set_uc_promiscuous_enable(adpt, true); + ne6x_dev_set_mc_promiscuous_enable(adpt, true); + } else { + ne6x_dev_set_uc_promiscuous_enable(adpt, false); + ne6x_dev_set_mc_promiscuous_enable(adpt, false); + /* Check for All Multicast modes */ + if (netdev->flags & IFF_ALLMULTI) + ne6x_dev_set_mc_promiscuous_enable(adpt, true); + else + __dev_mc_sync(netdev, ne6x_mc_addr_sync, ne6x_mc_addr_unsync); + } + + __dev_uc_sync(netdev, ne6x_uc_addr_sync, ne6x_uc_addr_unsync); +} + +static void ne6x_set_rx_mode(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!adpt) + return; + + queue_work(ne6x_wq, &adpt->set_rx_mode_task); +} + +static int ne6x_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!adpt) + return -1; + + return 0; +} + +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_UDP_TNL_FEATURES (NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +static netdev_features_t ne6x_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (features & NETIF_VLAN_FILTERING_FEATURES) + features |= NETIF_VLAN_FILTERING_FEATURES; + + return features; +} + +static int ne6x_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 value; + + value = ne6x_dev_get_features(adpt); + + if (changed & (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + value |= NE6X_F_TX_UDP_TNL_SEG; + else + value &= ~NE6X_F_TX_UDP_TNL_SEG; + } + + if (changed & NETIF_VLAN_OFFLOAD_FEATURES || changed & NETIF_VLAN_FILTERING_FEATURES) { + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + value |= NE6X_F_RX_VLAN_STRIP; + else + value &= ~NE6X_F_RX_VLAN_STRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + value |= NE6X_F_TX_VLAN; + else + value &= ~NE6X_F_TX_VLAN; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + value |= NE6X_F_RX_QINQ_STRIP; + else + value &= ~NE6X_F_RX_QINQ_STRIP; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + value |= NE6X_F_TX_QINQ; + else + value &= ~NE6X_F_TX_QINQ; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + value |= NE6X_F_RX_VLAN_FILTER; + else + value &= ~NE6X_F_RX_VLAN_FILTER; + } + + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO)) { + if (features & NETIF_F_RXCSUM) + value |= NE6X_OFFLOAD_RXCSUM; + else + value &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (features & NETIF_F_LRO) + value |= NE6X_OFFLOAD_LRO; + else + value &= ~NE6X_OFFLOAD_LRO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & NETIF_F_GSO_UDP) { + if (features & NETIF_F_GSO_UDP) + value |= NE6X_OFFLOAD_UFO; + else + value &= ~NE6X_OFFLOAD_UFO; + } + + if (changed & NETIF_F_IP_CSUM) { + if (features & NETIF_F_IP_CSUM) + value |= NE6X_OFFLOAD_TXCSUM; + else + value &= ~NE6X_OFFLOAD_TXCSUM; + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + value |= NE6X_OFFLOAD_RSS; + else + value &= ~NE6X_OFFLOAD_RSS; + } + + if (changed & NETIF_F_HW_L2FW_DOFFLOAD) { + if (features & NETIF_F_HW_L2FW_DOFFLOAD) + value |= NE6X_OFFLOAD_L2; + else + value &= ~NE6X_OFFLOAD_L2; + } + + if (changed & NETIF_F_SCTP_CRC) { + if (features & NETIF_F_SCTP_CRC) + value |= NE6X_OFFLOAD_SCTP_CSUM; + else + value &= ~NE6X_OFFLOAD_SCTP_CSUM; + } + + if (changed & NETIF_F_NTUPLE) { + if (features & NETIF_F_NTUPLE) + value |= NE6X_F_FLOW_STEERING; + else + value &= ~NE6X_F_FLOW_STEERING; + } + return ne6x_dev_set_features(adpt, value); +} + +static netdev_features_t ne6x_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + return features; + +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +int ne6x_link_speed_to_rate(int link_speed) +{ + switch (link_speed) { + case NE6X_LINK_SPEED_100GB: + return SPEED_100000; + case NE6X_LINK_SPEED_40GB: + return SPEED_40000; + case NE6X_LINK_SPEED_25GB: + return SPEED_25000; + case NE6X_LINK_SPEED_10GB: + return SPEED_10000; + default: + return SPEED_25000; + } +} + +int ne6x_validata_tx_rate(struct ne6x_adapter *adpt, int vf_id, int min_tx_rate, int max_tx_rate) +{ + if (!adpt) + return -EINVAL; + + if (min_tx_rate) { + dev_err(&adpt->back->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", + min_tx_rate, vf_id); + return -EINVAL; + } + + if (max_tx_rate > ne6x_link_speed_to_rate(adpt->port_info->phy.link_info.link_speed)) { + dev_err(&adpt->back->pdev->dev, "Invalid max tx rate (%d) (greater than link_speed) specified for VF %d.\n", + max_tx_rate, vf_id); + return -EINVAL; + } + + return 0; +} + +static struct ne6x_key_filter *ne6x_find_key(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f; + + list_for_each_entry(f, &pf->key_filter_list, list) { + if (f->key.pi == key.pi && ether_addr_equal(f->key.mac_addr, key.mac_addr)) + return f; + } + + return NULL; +} + +struct ne6x_key_filter *ne6x_add_key_list(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f = NULL; + + spin_lock_bh(&pf->key_list_lock); + + f = ne6x_find_key(pf, key); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->key = key; + + list_add_tail(&f->list, &pf->key_filter_list); + f->add = true; + } else { + f->refcnt++; + } + +clearout: + spin_unlock_bh(&pf->key_list_lock); + + return f; +} + +int ne6x_del_key_list(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f; + + spin_lock_bh(&pf->key_list_lock); + + f = ne6x_find_key(pf, key); + if (f) { + if (f->refcnt) { + f->refcnt--; + spin_unlock_bh(&pf->key_list_lock); + return -1; + } + + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&pf->key_list_lock); + + return 0; +} + +int ne6x_add_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size) +{ + struct ne6x_key_filter *f; + struct ne6x_key key; + + memset(&key, 0, sizeof(struct ne6x_key)); + key.pi = ADPT_LPORT(adpt); + memcpy(key.mac_addr, mac_addr, size); + + f = ne6x_add_key_list(adpt->back, key); + if (f->refcnt) + return -1; + + return 0; +} + +int ne6x_del_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size) +{ + struct ne6x_key key; + int ret; + + memset(&key, 0, sizeof(struct ne6x_key)); + key.pi = ADPT_LPORT(adpt); + memcpy(key.mac_addr, mac_addr, size); + + ret = ne6x_del_key_list(adpt->back, key); + if (ret) + return -1; + + return 0; +} + +static struct ne6x_vlan_filter *ne6x_find_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f; + + list_for_each_entry(f, &adpt->vlan_filter_list, list) { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +struct ne6x_vlan_filter *ne6x_add_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f = NULL; + + spin_lock_bh(&adpt->mac_vlan_list_lock); + + f = ne6x_find_vlan(adpt, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adpt->vlan_filter_list); + f->add = true; + } else { + f->refcnt++; + } + +clearout: + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + return f; +} + +int ne6x_del_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f; + + spin_lock_bh(&adpt->mac_vlan_list_lock); + + f = ne6x_find_vlan(adpt, vlan); + if (f) { + if (f->refcnt) { + f->refcnt--; + spin_unlock_bh(&adpt->mac_vlan_list_lock); + return -1; + } + + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + return 0; +} + +int ne6x_adpt_add_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f = ne6x_add_vlan_list(adpt, vlan); + + if (f->refcnt == 0) + ne6x_dev_vlan_add(adpt, &vlan); + + return 0; +} + +int ne6x_adpt_del_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + int ret; + + ret = ne6x_del_vlan_list(adpt, vlan); + if (ret == 0) + ne6x_dev_vlan_del(adpt, &vlan); + + return 0; +} + +int ne6x_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos, __be16 vlan_proto) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + u16 local_vlan_proto = ntohs(vlan_proto); + u16 vid_temp = 0, tpid_temp = 0; + struct ne6x_vlan vlan; + struct ne6x_adapter *adpt; + struct device *dev; + struct ne6x_vf *vf; + int lport; + + dev = ne6x_pf_to_dev(pf); + + if (vf_id < 0 || vf_id >= pf->num_alloc_vfs / 2 || vlan_id >= (VLAN_N_VID - 1) || qos > 7) { + dev_err(dev, "Invalid Port VLAN parameters for VF %d,vlan ID %d, QoS %d\n", + vf_id, vlan_id, qos); + return -EINVAL; + } + + if (!ne6x_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { + dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", + local_vlan_proto); + return -EPROTONOSUPPORT; + } + + lport = ADPT_LPORT(np->adpt); + vf_id += (pf->num_alloc_vfs / 2) * lport; + + vf = ne6x_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + vf->port_vlan_info = NE6X_VLAN(local_vlan_proto, vlan_id, qos); + if (vf->port_vlan_info.prio || vf->port_vlan_info.vid) + dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", + vlan_id, qos, local_vlan_proto, vf_id); + else + dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); + + adpt = vf->adpt; + + dev_info(dev, "%s: net_name:%s TPID:%08x vlan_id:%d qos:%d lport:%d vport:%d vlan_id:%d tpid:%04x %d\n", + __func__, netdev->name, local_vlan_proto, vlan_id, qos, ADPT_LPORT(adpt), + ADPT_VPORT(adpt), vf->port_vlan_info.vid, vf->port_vlan_info.tpid, vf->vfp_vid); + + vlan = NE6X_VLAN(local_vlan_proto, vlan_id, qos); + + if (vlan.vid == 0) { + if (vf->vfp_tpid == vlan.tpid) { + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + vf->vfp_vid = 0; + vf->vfp_tpid = 0; + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } else { + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + vf->vfp_vid = 0; + vf->vfp_tpid = 0; + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } + + } else if (vlan.vid > 0 && vlan.vid < (VLAN_N_VID - 1)) { + vid_temp = vlan.vid; + tpid_temp = vlan.tpid; + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + + if (vf->vfp_vid == vid_temp) { + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } + + vlan.vid = vid_temp; + vlan.tpid = tpid_temp; + vid_temp = (qos << VLAN_PRIO_SHIFT) | (vlan.vid & VLAN_VID_MASK); + vf->vfp_vid = vf->port_vlan_info.vid; + vf->vfp_tpid = vf->port_vlan_info.tpid; + ne6x_dev_add_vf_qinq(vf, tpid_temp, vid_temp); + ne6x_adpt_add_vlan(vf->adpt, vlan); + } else { + return -EINVAL; + } + + return 0; +} + +static void *ne6x_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_macvlan *mv = NULL; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, vdev->dev_addr); + mv = devm_kzalloc(ne6x_pf_to_dev(adpt->back), sizeof(*mv), GFP_KERNEL); + if (!mv) + return NULL; + + ne6x_adpt_add_mac(adpt, mac, true); + INIT_LIST_HEAD(&mv->list); + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &adpt->macvlan_list); + netdev_info(netdev, "MACVLAN offloads for %s are on\n", vdev->name); + + return mv; +} + +static void ne6x_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct ne6x_macvlan *mv = (struct ne6x_macvlan *)accel_priv; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!accel_priv) + return; + + ne6x_adpt_del_mac(adpt, mv->mac, true); + list_del(&mv->list); + devm_kfree(ne6x_pf_to_dev(adpt->back), mv); + + netdev_info(netdev, "MACVLAN offloads for %s are off\n", mv->vdev->name); +} + +static const struct net_device_ops ne6x_netdev_ops = { + .ndo_open = ne6x_open, + .ndo_stop = ne6x_close, + .ndo_start_xmit = ne6x_lan_xmit_frame, + .ndo_get_stats64 = ne6x_get_netdev_stats_struct, + .ndo_set_rx_mode = ne6x_set_rx_mode, + .ndo_set_mac_address = ne6x_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ne6x_change_mtu, + .ndo_tx_timeout = ne6x_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ne6x_netpoll, +#endif + .ndo_set_vf_rate = ne6x_ndo_set_vf_bw, + .ndo_set_tx_maxrate = ne6x_set_tx_maxrate, + .ndo_set_vf_mac = ne6x_set_vf_mac, + .ndo_get_vf_config = ne6x_get_vf_config, + .ndo_set_vf_trust = ne6x_set_vf_trust, + .ndo_set_vf_vlan = ne6x_set_vf_port_vlan, + .ndo_set_vf_link_state = ne6x_set_vf_link_state, + .ndo_vlan_rx_add_vid = ne6x_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6x_vlan_rx_kill_vid, + .ndo_set_features = ne6x_set_features, + .ndo_features_check = ne6x_features_check, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = ne6x_rx_flow_steer, +#endif + .ndo_tx_timeout = ne6x_tx_timeout, + .ndo_dfwd_add_station = ne6x_fwd_add_macvlan, + .ndo_dfwd_del_station = ne6x_fwd_del_macvlan, + .ndo_fix_features = ne6x_fix_features, + .ndo_set_features = ne6x_set_features, +}; + +void ne6x_sync_features(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 value; + + value = ne6x_dev_get_features(adpt); + + if (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + value |= NE6X_F_TX_UDP_TNL_SEG; + else + value &= ~NE6X_F_TX_UDP_TNL_SEG; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + value |= NE6X_F_RX_VLAN_STRIP; + else + value &= ~NE6X_F_RX_VLAN_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + value |= NE6X_F_TX_VLAN; + else + value &= ~NE6X_F_TX_VLAN; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) + value |= NE6X_F_RX_QINQ_STRIP; + else + value &= ~NE6X_F_RX_QINQ_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_TX) + value |= NE6X_F_TX_QINQ; + else + value &= ~NE6X_F_TX_QINQ; + + if (netdev->features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + value |= NE6X_F_RX_VLAN_FILTER; + else + value &= ~NE6X_F_RX_VLAN_FILTER; + + if (netdev->features & NETIF_F_RXCSUM) + value |= NE6X_OFFLOAD_RXCSUM; + else + value &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (netdev->features & NETIF_F_LRO) + value |= NE6X_OFFLOAD_LRO; + else + value &= ~NE6X_OFFLOAD_LRO; + + if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + + if (netdev->features & NETIF_F_GSO_UDP) + value |= NE6X_OFFLOAD_UFO; + else + value &= ~NE6X_OFFLOAD_UFO; + + if (netdev->features & NETIF_F_IP_CSUM) + value |= NE6X_OFFLOAD_TXCSUM; + else + value &= ~NE6X_OFFLOAD_TXCSUM; + + if (netdev->features & NETIF_F_RXHASH) + value |= NE6X_OFFLOAD_RSS; + else + value &= ~NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + value |= NE6X_OFFLOAD_L2; + else + value &= ~NE6X_OFFLOAD_L2; + + if (netdev->features & NETIF_F_SCTP_CRC) + value |= NE6X_OFFLOAD_SCTP_CSUM; + else + value &= ~NE6X_OFFLOAD_SCTP_CSUM; + + if (netdev->features & NETIF_F_NTUPLE) + value |= NE6X_F_FLOW_STEERING; + else + value &= ~NE6X_F_FLOW_STEERING; + + ne6x_dev_set_features(adpt, value); +} + +static void ne6x_set_netdev_features(struct net_device *netdev) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + netdev_features_t vlano_features = 0u; + netdev_features_t csumo_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_NTUPLE | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_LRO | + NETIF_F_LOOPBACK | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4 | + NETIF_F_GSO_SCTP | + 0; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; + netdev->vlan_features |= dflt_features | csumo_features | tso_features; + netdev->hw_features |= NETIF_F_HW_TC; + pf->hw.dvm_ena = 0x1; + + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_FILTER; +} + +static int ne6x_config_netdev(struct ne6x_adapter *adpt) +{ + struct ne6x_rss_info *rss_info = &adpt->rss_info; + struct ne6x_pf *pf = adpt->back; + struct ne6x_netdev_priv *np; + struct net_device *netdev; + char name[IFNAMSIZ] = {0}; + int etherdev_size, index; + u8 mac_addr[ETH_ALEN]; + + if (pf->hw.bus.domain_num) + sprintf(name, "enP%dp%ds0f%d", + pf->hw.bus.domain_num, pf->hw.bus.bus_num, adpt->idx); + else + sprintf(name, "enp%ds0f%d", pf->hw.bus.bus_num, adpt->idx); + + etherdev_size = sizeof(struct ne6x_netdev_priv); + + netdev = alloc_netdev_mq(etherdev_size, name, NET_NAME_USER, ether_setup, adpt->num_queue); + if (!netdev) + return -ENOMEM; + + adpt->netdev = netdev; + np = netdev_priv(netdev); + np->adpt = adpt; + + /* begin rss info */ + rss_info->hash_type = NE6X_RSS_HASH_TYPE_IPV4_TCP | + NE6X_RSS_HASH_TYPE_IPV4_UDP | + NE6X_RSS_HASH_TYPE_IPV4 | + NE6X_RSS_HASH_TYPE_IPV6_TCP | + NE6X_RSS_HASH_TYPE_IPV6_UDP | + NE6X_RSS_HASH_TYPE_IPV6; + rss_info->hash_func = NE6X_RSS_HASH_FUNC_TOEPLITZ; + rss_info->hash_key_size = NE6X_RSS_MAX_KEY_SIZE; + rss_info->ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + netdev_rss_key_fill(rss_info->hash_key, sizeof(rss_info->hash_key)); + + for (index = 0; index < rss_info->ind_table_size; index++) + rss_info->ind_table[index] = ethtool_rxfh_indir_default(index, adpt->num_queue); + + ne6x_dev_set_rss(adpt, rss_info); /* end rss info */ + + ne6x_set_netdev_features(netdev); + + SET_NETDEV_DEV(netdev, &pf->pdev->dev); + ether_addr_copy(mac_addr, adpt->port_info->mac.perm_addr); + eth_hw_addr_set(netdev, mac_addr); + ether_addr_copy(netdev->perm_addr, mac_addr); + + netdev->netdev_ops = &ne6x_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + ne6x_set_ethtool_ops(netdev); + +/* MTU range: 128 - 15342 */ + netdev->min_mtu = NE6X_MIN_MTU_SIZE; + netdev->max_mtu = NE6X_MAX_RXBUFFER - NE6X_PACKET_HDR_PAD - ETH_FCS_LEN; + netdev->gso_max_size = 65535; + netdev->needed_headroom = 32; + netdev->needed_tailroom = 32; + ne6x_dev_set_mtu(adpt, netdev->mtu); + ne6x_sync_features(netdev); + + return 0; +} + +static void ne6x_map_vector_to_qp(struct ne6x_adapter *adpt, int v_idx, int qp_idx) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[v_idx]; + struct ne6x_ring *tx_ring = adpt->tx_rings[qp_idx]; + struct ne6x_ring *rx_ring = adpt->rx_rings[qp_idx]; + struct ne6x_ring *cq_ring = adpt->cq_rings[qp_idx]; + struct ne6x_ring *tg_ring = adpt->tg_rings[qp_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + + cq_ring->q_vector = q_vector; + cq_ring->next = q_vector->cq.ring; + q_vector->cq.ring = cq_ring; + q_vector->cq.count++; + tg_ring->q_vector = q_vector; + tg_ring->next = q_vector->cq.ring; + q_vector->tg.ring = tg_ring; + q_vector->tg.count++; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +void ne6x_adpt_map_rings_to_vectors(struct ne6x_adapter *adpt) +{ + int q_vectors = adpt->num_q_vectors; + int qp_remaining = adpt->num_queue; + struct ne6x_q_vector *q_vector; + int num_ringpairs; + int v_start = 0; + int qp_idx = 0; + + /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to + * group them so there are multiple queues per vector. + * It is also important to go through all the vectors available to be + * sure that if we don't use all the vectors, that the remaining vectors + * are cleared. This is especially important when decreasing the + * number of queues in use. + */ + for (; v_start < q_vectors; v_start++) { + q_vector = adpt->q_vectors[v_start]; + + num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); + + q_vector->num_ringpairs = num_ringpairs; + q_vector->reg_idx = q_vector->v_idx + adpt->base_vector; + + q_vector->rx.count = 0; + q_vector->tx.count = 0; + q_vector->cq.count = 0; + q_vector->tg.count = 0; + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + q_vector->cq.ring = NULL; + q_vector->tg.ring = NULL; + + while (num_ringpairs--) { + ne6x_map_vector_to_qp(adpt, v_start, qp_idx); + qp_idx++; + qp_remaining--; + } + } +} + +void ne6x_adpt_reset_stats(struct ne6x_adapter *adpt) +{ + struct rtnl_link_stats64 *ns; + int i; + + if (!adpt) + return; + + ns = ne6x_get_adpt_stats_struct(adpt); + memset(ns, 0, sizeof(*ns)); + memset(&adpt->net_stats_offsets, 0, sizeof(adpt->net_stats_offsets)); + memset(&adpt->eth_stats, 0, sizeof(adpt->eth_stats)); + memset(&adpt->eth_stats_offsets, 0, sizeof(adpt->eth_stats_offsets)); + + if (adpt->rx_rings && adpt->rx_rings[0]) { + for (i = 0; i < adpt->num_queue; i++) { + memset(&adpt->rx_rings[i]->stats, 0, + sizeof(adpt->rx_rings[i]->stats)); + memset(&adpt->rx_rings[i]->rx_stats, 0, + sizeof(adpt->rx_rings[i]->rx_stats)); + memset(&adpt->rx_rings[i]->cq_stats, 0, + sizeof(adpt->rx_rings[i]->cq_stats)); + memset(&adpt->tx_rings[i]->stats, 0, + sizeof(adpt->tx_rings[i]->stats)); + memset(&adpt->tx_rings[i]->tx_stats, 0, + sizeof(adpt->tx_rings[i]->tx_stats)); + } + } +} + +static int ne6x_adpt_setup(struct ne6x_pf *pf) +{ + struct ne6x_adapter *adpt = NULL; + u32 is_write_proterct = false; + struct ne6x_hw *hw = &pf->hw; + int i, ret = 0; + u32 value; + + /* PF + VP */ + pf->adpt = kcalloc(NE6X_MAX_VP_NUM + 4, sizeof(*pf->adpt), GFP_KERNEL); + if (!pf->adpt) + return -ENOMEM; + + ne6x_dev_get_norflash_write_protect(pf, &is_write_proterct); + + /* Need to protect the allocation of the adapters at the PF level */ + for (i = pf->num_alloc_adpt - 1; i >= 0; i--) { + struct ne6x_vlan vlan = {0}; + + adpt = kzalloc(sizeof(*adpt), GFP_KERNEL); + adpt->back = pf; + pf->adpt[i] = adpt; + adpt->idx = i; + adpt->vport = NE6X_PF_VP0_NUM + i; /*vport*/ + set_bit(NE6X_ADPT_DOWN, adpt->comm.state); + + value = ne6x_dev_get_features(adpt); + if (value & NE6X_F_RX_FW_LLDP) + clear_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + else + set_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + + clear_bit(NE6X_ADPT_F_LINKDOWN_ON_CLOSE, adpt->flags); + clear_bit(NE6X_ADPT_F_DDOS_SWITCH, adpt->flags); + clear_bit(NE6X_ADPT_F_ACL, adpt->flags); + + if (is_write_proterct) + set_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + else + clear_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + + INIT_WORK(&adpt->set_rx_mode_task, ne6x_set_rx_mode_task); + + /* init multicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->mc_mac_addr.list); + mutex_init(&adpt->mc_mac_addr.mutex); + + /* init unicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->uc_mac_addr.list); + mutex_init(&adpt->uc_mac_addr.mutex); + + /* init vlan list head node */ + spin_lock_init(&adpt->mac_vlan_list_lock); + INIT_LIST_HEAD(&adpt->vlan_filter_list); + + INIT_LIST_HEAD(&adpt->macvlan_list); + init_waitqueue_head(&adpt->recv_notify); + + adpt->port_info = kzalloc(sizeof(*adpt->port_info), GFP_KERNEL); + if (!adpt->port_info) { + ret = -ENOMEM; + goto err_portinfo; + } + + adpt->port_info->lport = i; /* logical port */ + adpt->port_info->hw_trunk_id = i; + adpt->port_info->hw_port_id = ne6x_dev_get_pport(adpt); + adpt->port_info->queue = pf->hw.max_queue; + adpt->port_info->hw_max_queue = adpt->port_info->queue; + adpt->port_info->hw_queue_base = pf->hw.expect_vp * i; + adpt->comm.port_info = adpt->port_info->lport | (adpt->vport << 8); + adpt->port_info->hw = hw; + adpt->port_info->phy.curr_user_speed_req = 0x0; + + ne6x_dev_get_mac_addr(adpt, adpt->port_info->mac.perm_addr); + ne6x_set_num_rings_in_adpt(adpt); + + ret = ne6x_adpt_mem_alloc(pf, adpt); + if (ret) + goto err_netdev; + + ret = ne6x_config_netdev(adpt); + if (ret) + goto err_configdev; + + /* The unicast MAC address delivers the SDK */ + vlan = NE6X_VLAN(ETH_P_8021Q, 0xfff, 0); + ne6x_adpt_add_vlan(adpt, vlan); + ne6x_adpt_add_mac(adpt, adpt->port_info->mac.perm_addr, true); + ne6x_dev_add_broadcast_leaf(adpt); + + /* set up vectors and rings if needed */ + ret = ne6x_adpt_setup_vectors(adpt); + if (ret) + goto err_msix; + + ret = ne6x_alloc_rings(adpt); + if (ret) + goto err_rings; + + ne6x_init_arfs(adpt); + + ret = ne6x_set_cpu_rx_rmap(adpt); + if (ret) + netdev_info(adpt->netdev, "adpt rx rmap err: %d", ret); + + /* map all of the rings to the q_vectors */ + ne6x_adpt_map_rings_to_vectors(adpt); + ne6x_adpt_reset_stats(adpt); + ne6x_dev_set_port2pi(adpt); + ne6x_dev_set_pi2port(adpt); + ne6x_dev_set_vport(adpt); + ne6x_dev_set_rss(adpt, &adpt->rss_info); + } + + for (i = pf->num_alloc_adpt - 1; i >= 0; i--) { + adpt = pf->adpt[i]; + ret = ne6x_adpt_register_netdev(adpt); + if (ret) + goto err_configdev; + + adpt->netdev_registered = true; + netif_carrier_off(adpt->netdev); + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(adpt->netdev); + } + + return ret; + +err_rings: + ne6x_adpt_free_q_vectors(adpt); +err_msix: + if (adpt->netdev_registered) { + adpt->netdev_registered = false; + unregister_netdev(adpt->netdev); + free_netdev(adpt->netdev); + adpt->netdev = NULL; + } +err_configdev: + kfree(adpt->tx_rings); + kfree(adpt->q_vectors); +err_netdev: + kfree(adpt->port_info); +err_portinfo: + kfree(adpt); + + return ret; +} + +int ne6x_adpt_register_netdev(struct ne6x_adapter *adpt) +{ + int ret; + + ret = register_netdev(adpt->netdev); + if (ret) { + struct net_device *device = adpt->netdev; + struct ne6x_pf *pf = adpt->back; + char name[IFNAMSIZ] = {0}; + + sprintf(name, "enp%ds0f%%d", pf->hw.bus.bus_num); + strcpy(device->name, name); + return register_netdev(adpt->netdev); + } + + return ret; +} + +void ne6x_adjust_adpt_port_max_queue(struct ne6x_pf *pf) +{ + int cpu_num = num_online_cpus(); + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) { + pf->hw.expect_vp = pf->irq_pile->num_entries / pf->hw.pf_port; + /* actal max vp queue */ + pf->hw.max_queue = min_t(int, cpu_num, pf->hw.expect_vp); + dev_info(&pf->pdev->dev, "%s:hw->expect_vp = %d hw->max_queue = %d cpu_num = %d\n", + __func__, pf->hw.expect_vp, pf->hw.max_queue, cpu_num); + } +} + +static int ne6x_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ne6x_pf *pf; + struct ne6x_hw *hw; + u32 ioremap_len; + int err; + + if (PCI_FUNC(pdev->devfn) != 1) + return 0; + + /* initialize device for use with memory space */ + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + /* set up pci connections */ + err = pci_request_mem_regions(pdev, ne6x_driver_name); + if (err) { + dev_info(&pdev->dev, "pci_request_mem_regions failed %d\n", err); + goto err_pci_reg; + } + pci_set_master(pdev); + /* Now that we have a PCI connection, we need to do the + * low level device setup. This is primarily setting up + * the Admin Queue structures and then querying for the + * device's current profile information. + */ + pf = kzalloc(sizeof(*pf), GFP_KERNEL); + if (!pf) { + err = -ENOMEM; + goto err_pf_alloc; + } + pf->next_adpt = 0; + pf->pdev = pdev; + pci_set_drvdata(pdev, pf); + set_bit(NE6X_DOWN, pf->state); + + hw = &pf->hw; + hw->back = pf; + + ioremap_len = pci_resource_len(pdev, 0); + hw->hw_addr0 = ioremap(pci_resource_start(pdev, 0), ioremap_len); + if (!hw->hw_addr0) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar0 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 0), ioremap_len, err); + goto err_ioremap_hw_addr0; + } + + ioremap_len = pci_resource_len(pdev, 2); + hw->hw_addr2 = ioremap(pci_resource_start(pdev, 2), ioremap_len); + if (!hw->hw_addr2) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar2 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 2), ioremap_len, err); + goto err_ioremap_hw_addr2; + } + + ioremap_len = pci_resource_len(pdev, 4); + hw->hw_addr4 = ioremap(pci_resource_start(pdev, 4), ioremap_len); + if (!hw->hw_addr4) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar4 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 4), ioremap_len, err); + goto err_ioremap_hw_addr4; + } + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + hw->bus.domain_num = pci_domain_nr(pdev->bus); + hw->bus.bus_num = pdev->bus->number; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + + usleep_range(10, 20); + + mutex_init(&pf->mbus_comm_mutex); + if (ne6x_dev_init(pf)) { + err = -EIO; + dev_info(&pdev->dev, "sdk init failed!\n"); + goto error_sdk_init_failed; + } + usleep_range(10, 20); + + pci_save_state(pdev); + + /* hardware resource initialization */ + err = ne6x_hw_init(hw); + if (err) + goto err_unroll_alloc; + + /* driver private resource initialization */ + err = ne6x_pf_init(pf); + if (err) + goto err_pf_reset; + + /* interrupt resource initialization */ + err = ne6x_init_interrupt_scheme(pf); + if (err) + goto err_interrupt_scheme; + + ne6x_adjust_adpt_port_max_queue(pf); + + err = ne6x_adpt_setup(pf); + if (err) + goto err_adpts; + + ne6x_dev_set_nic_start(pf, 0); + add_timer(&pf->linkscan_tmr); + ne6x_enable_link_irq(pf); + pcie_print_link_status(pdev); + /* ready to go, so clear down state bit */ + clear_bit(NE6X_DOWN, pf->state); + return 0; + +err_adpts: + set_bit(NE6X_DOWN, pf->state); + ne6x_clear_interrupt_scheme(pf); +err_interrupt_scheme: + del_timer_sync(&pf->serv_tmr); +err_pf_reset: + devm_kfree(ne6x_hw_to_dev(hw), hw->port_info); + hw->port_info = NULL; +err_unroll_alloc: +error_sdk_init_failed: + iounmap(hw->hw_addr4); +err_ioremap_hw_addr4: + iounmap(hw->hw_addr2); + hw->hw_addr2 = NULL; +err_ioremap_hw_addr2: + iounmap(hw->hw_addr0); +err_ioremap_hw_addr0: + kfree(pf); +err_pf_alloc: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +void ne6x_adpt_free_arrays(struct ne6x_adapter *adpt, bool free_qvectors) +{ + /* free the ring and vector containers */ + if (free_qvectors) { + kfree(adpt->q_vectors); + adpt->q_vectors = NULL; + } + + kfree(adpt->tx_rings); + adpt->tx_rings = NULL; + adpt->rx_rings = NULL; + adpt->cq_rings = NULL; +} + +static int ne6x_adpt_clear(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf; + + if (!adpt) + return 0; + + if (!adpt->back) + goto free_adpt; + + pf = adpt->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->adpt[adpt->idx]) { + dev_err(&pf->pdev->dev, "pf->adpt[%d] is NULL, just free adpt[%d](type %d)\n", + adpt->idx, adpt->idx, adpt->type); + goto unlock_adpt; + } + + if (pf->adpt[adpt->idx] != adpt) { + dev_err(&pf->pdev->dev, "pf->adpt[%d](type %d) != adpt[%d](type %d): no free!\n", + pf->adpt[adpt->idx]->idx, pf->adpt[adpt->idx]->type, adpt->idx, adpt->type); + goto unlock_adpt; + } + + /* updates the PF for this cleared adpt */ + ne6x_adpt_free_arrays(adpt, true); + + pf->adpt[adpt->idx] = NULL; + if (adpt->idx < pf->next_adpt) + pf->next_adpt = adpt->idx; + +unlock_adpt: + mutex_unlock(&pf->switch_mutex); +free_adpt: + kfree(adpt); + + return 0; +} + +int ne6x_adpt_release(struct ne6x_adapter *adpt) +{ + struct mac_addr_head *mc_head = &adpt->mc_mac_addr; + struct mac_addr_head *uc_head = &adpt->uc_mac_addr; + struct mac_addr_node *temp_node, *addr_node; + struct ne6x_vlan_filter *vlf, *vlftmp; + struct ne6x_key_filter *klf, *klftmp; + struct ne6x_macvlan *mv, *mv_tmp; + struct ne6x_pf *pf = adpt->back; + + if (!test_bit(NE6X_DOWN, pf->state)) { + dev_info(&pf->pdev->dev, "Can't remove PF adapter\n"); + return -ENODEV; + } + + set_bit(NE6X_ADPT_RELEASING, adpt->comm.state); + + ne6x_remove_arfs(adpt); + ne6x_adpt_clear_ddos(pf); + ne6x_adpt_clear_mac_vlan(adpt); + ne6x_dev_del_broadcast_leaf(adpt); + /* release adpt multicast addr list resource */ + mutex_lock(&mc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &mc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&mc_head->mutex); + + /* release adpt unicast addr list resource */ + mutex_lock(&uc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &uc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&uc_head->mutex); + + spin_lock_bh(&adpt->mac_vlan_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adpt->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + spin_lock_bh(&adpt->back->key_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(klf, klftmp, &adpt->back->key_filter_list, list) { + list_del(&klf->list); + kfree(klf); + } + spin_unlock_bh(&adpt->back->key_list_lock); + + list_for_each_entry_safe(mv, mv_tmp, &adpt->macvlan_list, list) + ne6x_fwd_del_macvlan(adpt->netdev, mv); + + if (adpt->netdev_registered) { + adpt->netdev_registered = false; + if (adpt->netdev) + /* results in a call to i40e_close() */ + unregister_netdev(adpt->netdev); + } + + ne6x_free_cpu_rx_rmap(adpt); + ne6x_adpt_disable_irq(adpt); + + /* clear the sync flag on all filters */ + if (adpt->netdev) { + __dev_uc_unsync(adpt->netdev, NULL); + __dev_mc_unsync(adpt->netdev, NULL); + } + + ne6x_adpt_free_q_vectors(adpt); + if (adpt->netdev) { + free_netdev(adpt->netdev); + adpt->netdev = NULL; + } + + /*add for lldp*/ + ne6x_dev_set_fw_lldp(adpt, false); + ne6x_adpt_clear_rings(adpt); + ne6x_adpt_clear(adpt); + + return 0; +} + +static void ne6x_remove(struct pci_dev *pdev) +{ + struct ne6x_pf *pf = pci_get_drvdata(pdev); + struct ne6x_hw *hw = &pf->hw; + int i; + + if (PCI_FUNC(pdev->devfn) != 1) + return; + + ne6x_proc_pf_exit(pf); + ne6x_dbg_pf_exit(pf); + + ne6x_dev_set_nic_stop(pf, 0); + +#ifdef CONFIG_PCI_IOV + if (pf->num_alloc_vfs) { + set_bit(NE6X_REMOVE, pf->state); + ne6x_sriov_configure(pdev, 0); + } +#endif + + /* no more scheduling of any task */ + set_bit(NE6X_DOWN, pf->state); + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + if (pf->linkscan_tmr.function) + del_timer_sync(&pf->linkscan_tmr); + + if (pf->linkscan_work.func) + cancel_work_sync(&pf->linkscan_work); + + /* Now we can shutdown the PF's adapter, just before we kill + * adminq and hmc. + */ + for (i = 0; i < pf->num_alloc_adpt; i++) + ne6x_adpt_release(pf->adpt[i]); + + /* Clear all dynamic memory lists of rings, q_vectors, and adapters */ + rtnl_lock(); + ne6x_clear_interrupt_scheme(pf); + for (i = 0; i < pf->num_alloc_adpt; i++) { + if (pf->adpt[i]) { + ne6x_adpt_clear_rings(pf->adpt[i]); + ne6x_adpt_clear(pf->adpt[i]); + pf->adpt[i] = NULL; + } + } + rtnl_unlock(); + + kfree(pf->adpt); + + iounmap(hw->hw_addr4); + iounmap(hw->hw_addr2); + hw->hw_addr2 = NULL; + iounmap(hw->hw_addr0); + kfree(pf); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver ne6x_driver = { + .name = ne6x_driver_name, + .id_table = ne6x_pci_tbl, + .probe = ne6x_probe, + .remove = ne6x_remove, + .sriov_configure = ne6x_sriov_configure, +}; + +int __init ne6x_init_module(void) +{ + pr_info("%s: %s - version %s\n", ne6x_driver_name, ne6x_driver_string, + ne6x_driver_version_str); + pr_info("%s: %s\n", ne6x_driver_name, ne6x_copyright); + + ne6x_wq = create_singlethread_workqueue(ne6x_driver_name); + if (!ne6x_wq) { + pr_err("%s: Failed to create workqueue\n", ne6x_driver_name); + return -ENOMEM; + } + + ne6x_dbg_init(); + ne6x_proc_init(); + ne6x_netlink_init(); + + return pci_register_driver(&ne6x_driver); +} + +module_init(ne6x_init_module); + +void __exit ne6x_exit_module(void) +{ + pci_unregister_driver(&ne6x_driver); + destroy_workqueue(ne6x_wq); + ne6x_netlink_exit(); + ne6x_proc_exit(); + ne6x_dbg_exit(); +} + +module_exit(ne6x_exit_module); diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c new file mode 100644 index 000000000000..1e6f21b53242 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_debugfs.h" +#include "ne6x_dev.h" +#include "ne6x_netlink.h" + +static struct sock *ne6x_nlsock; +static DEFINE_MUTEX(ne6x_msg_mutex); + +static int ne6x_netlink_tab_add(struct ne6x_pf *pf, struct ne6x_rule *rule) +{ + struct ne6x_debug_table *table_info; + struct device *dev; + u32 table_id = 0xFFFFFFFF; + int err; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + if (unlikely(!table_info)) + return -ENOMEM; + + dev = ne6x_pf_to_dev(pf); + table_info->table = NE6X_REG_ACL_TABLE; + table_info->size = NE6X_HASH_KEY_SIZE; + memcpy(table_info->data, rule, sizeof(*rule)); + + err = ne6x_reg_table_search(pf, table_info->table, &table_info->data[0], + table_info->size, NULL, table_info->size); + if (err == -ENOENT) { + table_info->size = NE6X_HASH_KEY_SIZE + NE6X_HASH_DATA_SIZE; + err = ne6x_reg_table_insert(pf, table_info->table, &table_info->data[0], + table_info->size, &table_id); + } else { + dev_info(dev, "table exist\n"); + kfree(table_info); + return -EEXIST; + } + + if (err == 0) { + dev_info(dev, "insert rule_id = 0x%x success!\n", table_id); + } else if (err != -ETIMEDOUT) { + dev_info(dev, "insert rule_id = 0x%x fail!\n", table_id); + err = -EIO; + } else { + dev_info(dev, "insert rule_id = 0x%x timeout!\n", table_id); + err = EAGAIN; + } + + kfree(table_info); + return err; +} + +static int ne6x_netlink_tab_del(struct ne6x_pf *pf, struct ne6x_rule *rule) +{ + struct ne6x_debug_table *table_info; + struct device *dev; + int err; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + if (unlikely(!table_info)) + return -ENOMEM; + + dev = ne6x_pf_to_dev(pf); + table_info->table = NE6X_REG_ACL_TABLE; + table_info->size = NE6X_HASH_KEY_SIZE; + memcpy(table_info->data, rule, sizeof(*rule)); + + err = ne6x_reg_table_delete(pf, table_info->table, &table_info->data[0], table_info->size); + dev_info(dev, "%s: %s\n", __func__, (err == 0) ? "success!" : "timeout!"); + kfree(table_info); + + return err; +} + +static int ne6x_netlink_meter_write(struct ne6x_pf *pf, struct ne6x_meter *meter) +{ + struct meter_table vf_bw; + struct device *dev; + u32 cir_maxnum = 0xfffff; + u32 cbs_maxnum = 0xffffff; + u32 type_flag = 0; + u32 type_map = 0; + u32 cir; + int err; + + if (meter->type_num > NE6X_METER_TYPE_MAX || + meter->opcode > NE6X_METER_OPCODE_MAX) + return -EINVAL; + + dev = ne6x_pf_to_dev(pf); + type_flag |= BIT(meter->type_num); + + err = ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type_map); + if (err) + return err; + + if (meter->opcode) + type_map |= type_flag; + else + type_map &= ~type_flag; + + err = ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type_map); + if (err) + return err; + + cir = meter->value * 1000 + 1023; + cir = min(cir / 1024, cir_maxnum); + + vf_bw.cir = cir; + vf_bw.pir = min(cir + cir / 10, cir_maxnum); + + vf_bw.cbs = min(vf_bw.cir * 10000, cbs_maxnum); + vf_bw.pbs = min(vf_bw.pir * 10000, cbs_maxnum); + + err = ne6x_reg_config_meter(pf, NE6X_METER1_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + meter->type_num, (u32 *)&vf_bw, sizeof(vf_bw)); + + dev_info(dev, "%s\n", err ? "write meter fail!" : "write meter success!"); + + return err; +} + +static int ne6x_netlink_rcv_msg(struct nlmsghdr *nlh) +{ + char name[IFNAMSIZ] = {0}; + struct net_device *dev; + struct ne6x_pf *pf; + void *data; + int err; + + strncpy(name, nlmsg_data(nlh), IFNAMSIZ - 1); + dev = __dev_get_by_name(&init_net, name); + if (unlikely(!dev)) + return -ENODEV; + + if (unlikely(!netif_is_ne6x(dev))) + return -EOPNOTSUPP; + + pf = ne6x_netdev_to_pf(dev); + data = nlmsg_data(nlh) + IFNAMSIZ; + + switch (nlh->nlmsg_type) { + case NE6X_NLMSG_TAB_ADD: + /* if entry exists, treat it as insertion success */ + err = ne6x_netlink_tab_add(pf, data); + if (err == -EEXIST) + err = 0; + break; + case NE6X_NLMSG_TAB_DEL: + err = ne6x_netlink_tab_del(pf, data); + break; + case NE6X_NLMSG_METER_WRITE: + err = ne6x_netlink_meter_write(pf, data); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static void ne6x_netlink_ack(struct sk_buff *in_skb, unsigned long *status) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + size_t payload; + + payload = BITS_TO_LONGS(NE6X_RULE_BATCH_MAX) * sizeof(unsigned long); + skb_out = nlmsg_new(payload, GFP_KERNEL); + if (unlikely(!skb_out)) { + NETLINK_CB(in_skb).sk->sk_err = ENOBUFS; + NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk); + return; + } + + nlh = nlmsg_put(skb_out, NETLINK_CB(in_skb).portid, 0, NLMSG_DONE, payload, 0); + if (unlikely(!nlh)) { + nlmsg_free(skb_out); + return; + } + + NETLINK_CB(skb_out).dst_group = 0; + bitmap_copy(nlmsg_data(nlh), status, NE6X_RULE_BATCH_MAX); + + nlmsg_unicast(in_skb->sk, skb_out, NETLINK_CB(in_skb).portid); +} + +static void ne6x_netlink_rcv(struct sk_buff *skb) +{ + DECLARE_BITMAP(status, NE6X_RULE_BATCH_MAX); + u32 idx = 0; + + bitmap_zero(status, NE6X_RULE_BATCH_MAX); + mutex_lock(&ne6x_msg_mutex); + while (skb->len >= nlmsg_total_size(0) && idx < NE6X_RULE_BATCH_MAX) { + struct nlmsghdr *nlh; + int msglen, err; + + nlh = nlmsg_hdr(skb); + + if (unlikely(nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)) { + set_bit(idx, status); + goto skip; + } + + err = ne6x_netlink_rcv_msg(nlh); + if (err) + set_bit(idx, status); + +skip: + msglen = NLMSG_ALIGN(nlh->nlmsg_len); + if (unlikely(msglen > skb->len)) + msglen = skb->len; + + idx++; + skb_pull(skb, msglen); + } + + ne6x_netlink_ack(skb, status); + mutex_unlock(&ne6x_msg_mutex); +} + +/** + * ne6x_netlink_init - start up netlink resource for the driver + **/ +void ne6x_netlink_init(void) +{ + struct netlink_kernel_cfg ne6x_netlink_cfg = { + .input = ne6x_netlink_rcv, + }; + + ne6x_nlsock = netlink_kernel_create(&init_net, NE6X_NETLINK, &ne6x_netlink_cfg); + if (unlikely(!ne6x_nlsock)) + pr_warn("Init of netlink failed\n"); +} + +/** + * ne6x_netlink_exit - clean out the driver's netlink resource + **/ +void ne6x_netlink_exit(void) +{ + netlink_kernel_release(ne6x_nlsock); + ne6x_nlsock = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h new file mode 100644 index 000000000000..61a6cd1347bd --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_NETLINK_H +#define _NE6X_NETLINK_H + +#define NE6X_NETLINK 31 +#define NE6X_HASH_KEY_SIZE 64 +#define NE6X_HASH_DATA_SIZE 64 +#define NE6X_RULE_BATCH_MAX 64 +#define NE6X_METER_TYPE_MAX 8 +#define NE6X_METER_OPCODE_MAX 1 +#define NE6X_ADDR_LEN 16 + +/* netlink message opcodes */ +enum { + NE6X_NLMSG_BASE = 0x10, /* the type < 0x10 is reserved for control messages */ + NE6X_NLMSG_TAB_ADD = NE6X_NLMSG_BASE, + NE6X_NLMSG_TAB_DEL, + NE6X_NLMSG_METER_WRITE, + NE6X_NLMSG_MAX +}; + +struct ne6x_rule { + u8 dst[NE6X_ADDR_LEN]; + u8 src[NE6X_ADDR_LEN]; + u32 proto; +} __packed; + +struct ne6x_meter { + u8 type_num; + u8 opcode; + u32 value; +} __packed; + +void ne6x_netlink_init(void); +void ne6x_netlink_exit(void); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h new file mode 100644 index 000000000000..b60470095d99 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_PORTMAP_H +#define _NE6X_PORTMAP_H + +#include +#include + +#define PBMP_DWORD_NUM 4 +#define PBMP_WORD_WIDTH 32 + +typedef u32 pbmp_t[PBMP_DWORD_NUM]; + +#define SET_BIT(DAT, POS) ((DAT) |= ((u32)0x1 << (POS))) +#define CLR_BIT(DAT, POS) ((DAT) &= (~((u32)0x01 << (POS)))) + +#define PBMP_DWORD_GET(bm, word) ((bm)[(word)]) +#define PBMP_CLEAR(bm) \ + (PBMP_DWORD_GET(bm, 0) = PBMP_DWORD_GET(bm, 1) = \ + PBMP_DWORD_GET(bm, 2) = \ + PBMP_DWORD_GET(bm, 3) = 0) + +#define PBMP_WNET(port) ((port) / PBMP_WORD_WIDTH) +#define PBMP_WBIT(port) (1LU << ((port) % PBMP_WORD_WIDTH)) + +#define PBMP_ENTRY(bm, port) \ + (PBMP_DWORD_GET(bm, PBMP_WNET(port))) + +#define PBMP_PORT_REMOVE(bm, port) \ + (PBMP_ENTRY(bm, port) &= ~(PBMP_WBIT(port))) + +#define PBMP_PORT_ADD(bm, port) \ + (PBMP_ENTRY(bm, port) |= PBMP_WBIT(port)) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c new file mode 100644 index 000000000000..6015d51465c4 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" + +static struct proc_dir_entry *ne6x_proc_root; + +ssize_t ne6x_proc_tps_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct ne6x_soc_temperature temp = {0}; + struct ne6x_soc_power power = {0}; + struct device *dev = NULL; + struct ne6x_pf *pf = NULL; + char *info = NULL; + ssize_t len = 0; + int err; + + if (*ppos > 0 || count < PAGE_SIZE) + return 0; + + info = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!info) + return -ENOMEM; + + pf = filp->private_data; + dev = &pf->pdev->dev; + err = ne6x_dev_get_temperature_info(pf, &temp); + if (err) { + dev_err(dev, "get device temperature failed\n"); + } else { + len += sprintf(info, "Chip temperature (°C) %d\n", temp.chip_temerature); + len += sprintf(info + len, "Nic temerature (°C) %d\n", temp.board_temperature); + } + + err = ne6x_dev_get_power_consum(pf, &power); + if (err) { + dev_err(dev, "get device power failed\n"); + } else { + len += sprintf(info + len, "Current (A) %d.%03d\n", + power.cur / 1000, power.cur % 1000); + len += sprintf(info + len, "Voltage (V) %d.%03d\n", + power.vol / 1000, power.vol % 1000); + len += sprintf(info + len, "Power (W) %d.%03d\n", + power.power / 1000, power.power % 1000); + } + + if (!len) { + kfree(info); + return len; + } + + if (copy_to_user(buf, info, len)) { + kfree(info); + return -EFAULT; + } + + *ppos = len; + kfree(info); + return len; +} + +ssize_t ne6x_proc_i2c_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct device *dev = NULL; + struct ne6x_pf *pf = NULL; + char info[512] = {0}; + ssize_t len = 0; + u32 id = 0; + int err; + + if (*ppos > 0 || count < 512) + return 0; + + pf = filp->private_data; + dev = &pf->pdev->dev; + err = ne6x_dev_i2c3_signal_test(pf, &id); + if (err) + dev_err(dev, "get device i2c external info failed\n"); + else + len += sprintf(info, "I2c external sig test %d\n", id & 0xff); + + if (!len) + return len; + + if (copy_to_user(buf, info, len)) + return -EFAULT; + + *ppos = len; + return len; +} + +static int ne6x_tps_open(struct inode *inode, struct file *file) +{ + file->private_data = pde_data(inode); + + return 0; +} + +static int ne6x_i2c_open(struct inode *inode, struct file *file) +{ + file->private_data = pde_data(inode); + + return 0; +} + +static const struct proc_ops ne6x_proc_tps_fops = { + .proc_open = ne6x_tps_open, + .proc_read = ne6x_proc_tps_read, +}; + +static const struct proc_ops ne6x_proc_i2c_fops = { + .proc_open = ne6x_i2c_open, + .proc_read = ne6x_proc_i2c_read, +}; + +void ne6x_proc_pf_init(struct ne6x_pf *pf) +{ + struct proc_dir_entry *pfile = NULL; + const struct device *dev = NULL; + const char *name = NULL; + + name = pci_name(pf->pdev); + dev = &pf->pdev->dev; + pf->ne6x_proc_pf = proc_mkdir(name, ne6x_proc_root); + if (!pf->ne6x_proc_pf) { + dev_err(dev, "proc dir %s create failed\n", name); + return; + } + + pfile = proc_create_data("temperature_power_state", 0600, pf->ne6x_proc_pf, + &ne6x_proc_tps_fops, pf); + if (!pfile) { + dev_err(dev, "proc file temperature_power_state create failed\n"); + goto create_failed; + } + + pfile = proc_create_data("i2c_test", 0600, pf->ne6x_proc_pf, &ne6x_proc_i2c_fops, pf); + if (!pfile) { + dev_err(dev, "proc file i2c_test create failed\n"); + goto create_failed; + } + + return; + +create_failed: + proc_remove(pf->ne6x_proc_pf); +} + +void ne6x_proc_pf_exit(struct ne6x_pf *pf) +{ + proc_remove(pf->ne6x_proc_pf); + pf->ne6x_proc_pf = NULL; +} + +extern char ne6x_driver_name[]; +void ne6x_proc_init(void) +{ + ne6x_proc_root = proc_mkdir(ne6x_driver_name, NULL); + if (!ne6x_proc_root) + pr_info("init of proc failed\n"); +} + +void ne6x_proc_exit(void) +{ + proc_remove(ne6x_proc_root); + ne6x_proc_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h new file mode 100644 index 000000000000..d4ce94cab66b --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_PROCFS_H +#define _NE6X_PROCFS_H + +struct ne6x_pf; + +void ne6x_proc_pf_init(struct ne6x_pf *pf); +void ne6x_proc_pf_exit(struct ne6x_pf *pf); +void ne6x_proc_init(void); +void ne6x_proc_exit(void); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c new file mode 100644 index 000000000000..2b7f6f24ca25 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c @@ -0,0 +1,1620 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" + +#define AXIA_MBUS_READ_MEMORY_COMMAND 0x07 +#define AXIA_MBUS_READ_MEMORY_ACK 0x08 + +#define AXIA_MBUS_WRITE_MEMORY_COMMAND 0x09 +#define AXIA_MBUS_WRITE_MEMORY_ACK 0x0A + +#define AXIA_MBUS_READ_REGISTER_COMMAND 0x0B +#define AXIA_MBUS_READ_REGISTER_ACK 0x0C + +#define AXIA_MBUS_WRITE_REGISTER_COMMAND 0x0D +#define AXIA_MBUS_WRITE_REGISTER_ACK 0x0E + +#define AXIA_MBUS_RESET_FIRMWARE_COMMAND 0x0F +#define AXIA_MBUS_RESET_FIRMWARE_ACK 0x10 +#define AXIA_MBUS_READ_TABLE_COMMAND 0x11 +#define AXIA_MBUS_READ_TABLE_ACK 0x12 + +#define AXIA_MBUS_WRITE_TABLE_COMMAND 0x13 +#define AXIA_MBUS_WRITE_TABLE_ACK 0x14 + +#define AXIA_MBUS_CLEARUP_COMMAND 0x15 +#define AXIA_MBUS_CLEARUP_ACK 0x16 + +/* hash table operator */ +#define AXIA_MBUS_INSERT_COMMAND 0x17 +#define AXIA_MBUS_INSERT_ACK 0x18 + +#define AXIA_MBUS_UPDATE_COMMAND 0x19 +#define AXIA_MBUS_UPDATE_ACK 0x1A + +#define AXIA_MBUS_DELETE_COMMAND 0x1B +#define AXIA_MBUS_DELETE_ACK 0x1C + +#define AXIA_MBUS_LOOKUP_COMMAND 0x1D +#define AXIA_MBUS_LOOKUP_ACK 0x1E + +/* data download operator */ +#define AXIA_MBUS_DOWNLOAD_COMMAND 0x21 +#define AXIA_MBUS_DOWNLOAD_ACK 0x22 + +#define AXIA_MBUS_OPERATOR_COMMAND 0x23 +#define AXIA_MBUS_OPERATOR_ACK 0x24 + +#define AXIA_MBUS_SETUP_PORT_COMMAND 0x25 +#define AXIA_MBUS_SETUP_PORT_ACK 0x26 + +#define AXIA_MBUS_SETUP_TABLE_COMMAND 0x27 +#define AXIA_MBUS_SETUP_TABLE_ACK 0x28 + +#define AXIA_MBUS_SETUP_TAPI_COMMAND 0x29 +#define AXIA_MBUS_SETUP_TAPI_ACK 0x2A + +#define AXIA_MBUS_SETUP_HASH_COMMAND 0x2B +#define AXIA_MBUS_SETUP_HASH_ACK 0x2C + +#define AXIA_MBUS_SETUP_DTAB_COMMAND 0x2D +#define AXIA_MBUS_SETUP_DTAB_ACK 0x2E + +#define AXIA_MBUS_E2PROM_READ_COMMAND 0x2F +#define AXIA_MBUS_E2PROM_READ_ACK 0x30 + +#define AXIA_MBUS_E2PROM_WRITE_COMMAND 0x31 +#define AXIA_MBUS_E2PROM_WRITE_ACK 0x32 + +#define AXIA_MBUS_SET_FAN_SPEED_COMMAND 0x33 +#define AXIA_MBUS_SET_FAN_SPEED_ACK 0x34 + +#define AXIA_MBUS_GET_FAN_SPEED_COMMAND 0x35 +#define AXIA_MBUS_GET_FAN_SPEED_ACK 0x36 + +#define AXIA_MBUS_GET_SYSTEM_INFO_COMMAND 0x37 +#define AXIA_MBUS_GET_SYSTEM_INFO_ACK 0x38 + +#define AXIA_MBUS_UPGRADE_PRE_COMMAND 0x39 +#define AXIA_MBUS_UPGRADE_PRE_COMMAND_ACK 0x3A +#define AXIA_MBUS_UPGRADE_COMMAND 0x3B +#define AXIA_MBUS_UPGRADE_COMMAND_ACK 0x3C + +#define AXIA_MBUS_GET_VER_COMMAND 0x3D +#define AXIA_MBUS_GET_VER_COMMAND_ACK 0x3E + +#define AXIA_MBUS_TALK_PORT_BASE 0x41 + +#define AXIA_MBUS_TALK_SET_PORT_ENABLE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 0) +#define AXIA_MBUS_TALK_SET_PORT_ENABLE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_ENABLE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 2) +#define AXIA_MBUS_TALK_GET_PORT_ENABLE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 3) + +#define AXIA_MBUS_TALK_SET_PORT_DUPLEX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 0) +#define AXIA_MBUS_TALK_SET_PORT_DUPLEX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 1) + +#define AXIA_MBUS_TALK_GET_PORT_DUPLEX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 2) +#define AXIA_MBUS_TALK_GET_PORT_DUPLEX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 0) +#define AXIA_MBUS_TALK_SET_PORT_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 2) +#define AXIA_MBUS_TALK_GET_PORT_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 3) + +#define AXIA_MBUS_TALK_SET_PORT_STATS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 0) +#define AXIA_MBUS_TALK_SET_PORT_STATS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 1) + +#define AXIA_MBUS_TALK_GET_PORT_STATS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 2) +#define AXIA_MBUS_TALK_GET_PORT_STATS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 3) + +#define AXIA_MBUS_TALK_SET_PORT_FEC_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 0) +#define AXIA_MBUS_TALK_SET_PORT_FEC_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 1) + +#define AXIA_MBUS_TALK_GET_PORT_FEC_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 2) +#define AXIA_MBUS_TALK_GET_PORT_FEC_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SPEED_MAX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 0) +#define AXIA_MBUS_TALK_SET_PORT_SPEED_MAX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SPEED_MAX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 2) +#define AXIA_MBUS_TALK_GET_PORT_SPEED_MAX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 3) + +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 0) +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 2) +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 3) + +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ADDR_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 0) +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ADDR_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 1) + +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ADDR_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 2) +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ADDR_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 3) + +#define AXIA_MBUS_TALK_SET_PORT_LOOPBACK_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 0) +#define AXIA_MBUS_TALK_SET_PORT_LOOPBACK_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 1) + +#define AXIA_MBUS_TALK_GET_PORT_LOOPBACK_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 2) +#define AXIA_MBUS_TALK_GET_PORT_LOOPBACK_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 3) + +#define AXIA_MBUS_TALK_SET_PORT_MAX_FRAME_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 0) +#define AXIA_MBUS_TALK_SET_PORT_MAX_FRAME_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 1) + +#define AXIA_MBUS_TALK_GET_PORT_MAX_FRAME_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 2) +#define AXIA_MBUS_TALK_GET_PORT_MAX_FRAME_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 3) + +#define AXIA_MBUS_TALK_SET_PORT_AUTO_NEG_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 0) +#define AXIA_MBUS_TALK_SET_PORT_AUTO_NEG_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 1) + +#define AXIA_MBUS_TALK_GET_PORT_AUTO_NEG_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 2) +#define AXIA_MBUS_TALK_GET_PORT_AUTO_NEG_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 3) + +#define AXIA_MBUS_TALK_SET_PORT_INFO_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 0) +#define AXIA_MBUS_TALK_SET_PORT_INFO_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 1) + +#define AXIA_MBUS_TALK_GET_PORT_INFO_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 2) +#define AXIA_MBUS_TALK_GET_PORT_INFO_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 3) + +#define AXIA_MBUS_TALK_SET_PORT_LINK_STATUS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 0) +#define AXIA_MBUS_TALK_SET_PORT_LINK_STATUS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 1) + +#define AXIA_MBUS_TALK_GET_PORT_LINK_STATUS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 2) +#define AXIA_MBUS_TALK_GET_PORT_LINK_STATUS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 3) + +#define AXIA_MBUS_TALK_SET_PORT_DRV_I2C_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 0) +#define AXIA_MBUS_TALK_SET_PORT_DRV_I2C_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 1) + +#define AXIA_MBUS_TALK_GET_PORT_DRV_I2C_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 2) +#define AXIA_MBUS_TALK_GET_PORT_DRV_I2C_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SELF_TEST_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 0) +#define AXIA_MBUS_TALK_SET_PORT_SELF_TEST_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SELF_TEST_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 2) +#define AXIA_MBUS_TALK_GET_PORT_SELF_TEST_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_TYPE_LEN_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_TYPE_LEN_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_TYPE_LEN_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_TYPE_LEN_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_EEPROM_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_EEPROM_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 3) + +#define AXIA_MBUS_TALK_SET_PORT_STATE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 0) +#define AXIA_MBUS_TALK_SET_PORT_STATE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_STATE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 2) +#define AXIA_MBUS_TALK_GET_PORT_STATE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 3) + +#define AXIA_MBUS_SET_NIC_START_COMMAND 0x9F +#define AXIA_MBUS_SET_NIC_START_ACK 0xA0 +#define AXIA_MBUS_SET_NIC_STOP_COMMAND 0xA1 +#define AXIA_MBUS_SET_NIC_STOP_ACK 0xA2 +#define AXIA_MBUS_GET_NIC_STATE_COMMAND 0xA3 +#define AXIA_MBUS_GET_NIC_STATE_ACK 0xA4 +#define AXIA_MBUS_SET_NP_USERDATA_COMMAND 0xA5 +#define AXIA_MBUS_SET_NP_USERDATA_ACK 0xA6 +#define AXIA_MBUS_GET_NP_USERDATA_COMMAND 0xA7 +#define AXIA_MBUS_GET_NP_USERDATA_ACK 0xA8 + +#define AXIA_MBUS_SET_LED_STATE_COMMAND 0xA9 +#define AXIA_MBUS_SET_LED_STATE_ACK 0xAA + +#define AXIA_MBUS_CONFIG_METER_COMMAND 0xAB +#define AXIA_MBUS_CONFIG_METER_ACK 0xAC + +#define AXIA_MBUS_CLEAR_CREDIT_COMMAND 0xAD +#define AXIA_MBUS_CLEAR_CREDIT_ACK 0xAE + +#define AXIA_MBUS_SET_FAST_L2FDB_COMMAND 0xD1 +#define AXIA_MBUS_SET_FAST_L2FDB_ACK 0xD2 + +#define AXIA_MBUS_GET_DUMP_DATA_LEN_COMMAND 0xD3 +#define AXIA_MBUS_GET_DUMP_DATA_LEN_ACK 0xD4 + +#define AXIA_MBUS_GET_DUMP_DATA_COMMAND 0xD5 +#define AXIA_MBUS_GET_DUMP_DATA_ACK 0xD6 + +#define AXIA_MBUS_CLR_TABLE_COMMAND 0xD7 +#define AXIA_MBUS_CLR_TABLE_ACK 0xD8 + +#define AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_COMMAND 0xD9 +#define AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_ACK 0xDA + +#define AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_COMMAND 0xDB +#define AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_ACK 0xDC + +#define AXIA_MBUS_OPT_NOFLASH_COMMAND 0xDD +#define AXIA_MBUS_OPT_NOFLASH_ACK 0xDE + +#define PCIE2C810_SHM_MBUS_BASE 0x20878000 +#define PCIE2C810_SHM_DATA_BASE 0x20878004 + +#define MEM_ONCHIP_64BIT 0x00 +#define MEM_ONCHIP_512BIT 0x01 +#define MEM_ONXDDR_512BIT 0x04 + +enum engine_idx { + ENGINE_DIRECT_TABLE0 = 0x1, + ENGINE_DIRECT_TABLE1, + ENGINE_HASHA_TABLE, + ENGINE_HASHB_TABLE, +}; + +struct axia_mbus_msg { + union { + u32 uint; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 opcode : 8; + u32 dst_block : 4; + u32 src_block : 4; + u32 data_len : 14; + u32 e : 2; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u32 e : 2; + u32 data_len : 14; + u32 src_block : 4; + u32 dst_block : 4; + u32 opcode : 8; +#endif + } bits; + } hdr; + u32 data[]; +} __packed; + +struct ne6x_diag_reg_test_info ne6x_reg_list[] = { + /* offset mask elements stride */ + {NE6X_VP_BASE_ADDR, 0xFFFFFFFFFFFFFFFF, NE6X_VP_INT, 0}, + {0} +}; + +struct ne6x_reg_table_info { + u32 addr; /* engine id as base address */ + u32 size; /* 00 - 15: length + * 16 - 20: + * 21 - 23: entry_num + * 24 - 26: mem_type + * 27 - 27: mem_type_bucekt + * 28 - 31: opcode + */ + u32 opcode_read; + u32 opcode_write; +#define ADV_CMD_DISABLE 0x00 +#define ADV_CMD_EBABLE 0x01 + u32 advanced_cmd; + u32 opcode_insert; + u32 opcode_delete; + u32 opcode_lookup; + u32 opcode_update; + u32 size_insert; + u32 size_delete; + u32 size_lookup; + u32 size_update; +}; + +static struct ne6x_reg_table_info table_info[] = { + /* address size(tableidx + memtype + bucket + entry_num + size) + * read write adv_cmd insert delete lookup size_insert size_delete size_lookup + */ + {0x00000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (8 << 16) | 0x0200, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x10000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 128, 64, 64, 64}, + + {0x20000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0010, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x30000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (8 << 16) | 0x0008, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x40000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (4 << 16) | 0x0100, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x50000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_512BIT << 24) | (1 << 21) | (1 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x60000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 128, 64, 64, 64}, + + {0x70000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 96, 64, 64, 32}, +}; + +#define TABLE_ADDR(table) (table_info[table].addr & 0xF0000000) +#define TABLE_SIZE(table) (table_info[table].size & 0x00000FFF) +#define TABLE_XMEM(table) (table_info[table].size & 0xFFE00000) +#define TABLE_XNUM(table) ((table_info[table].size >> 16) & 0xF) + +#define TABLE_OPCODE_WRITE(table) (table_info[table].opcode_write & 0x3F) +#define TABLE_OPCODE_READ(table) (table_info[table].opcode_read & 0x3F) +#define TABLE_ADVCMD_VALID(table) (table_info[table].advanced_cmd == 0x01) +#define TABLE_OPCODE_INSERT(table) (table_info[table].opcode_insert & 0x3F) +#define TABLE_OPCODE_DELETE(table) (table_info[table].opcode_delete & 0x3F) +#define TABLE_OPCODE_LOOKUP(table) (table_info[table].opcode_lookup & 0x3F) + +#define TABLE_OPCODE_UPDATE(table) (table_info[table].opcode_update & 0x3F) + +#define TABLE_SIZE_INSERT(table) (table_info[table].size_insert) +#define TABLE_SIZE_DELETE(table) (table_info[table].size_delete) +#define TABLE_SIZE_LOOKUP(table) (table_info[table].size_lookup) +#define TABLE_SIZE_UPDATE(table) (table_info[table].size_update) +#define TABLE_SIZE_LOOKUP_RET(table) (table_info[table].size & 0xFFF) + +#define NUM_TABLE(table) (table_info[table].table_num) + +static u64 local_module_base; + +void ne6x_reg_lock(struct ne6x_pf *pf) +{ + mutex_lock(&pf->mbus_comm_mutex); +} + +void ne6x_reg_unlock(struct ne6x_pf *pf) +{ + mutex_unlock(&pf->mbus_comm_mutex); +} + +void ne6x_switch_pci_write(void *bar_base, u32 base_addr, u32 offset_addr, u64 reg_value) +{ + unsigned int reg_offset = 0; + void __iomem *addr = NULL; + + reg_offset = (base_addr << 12) + (offset_addr << 4); + addr = bar_base + reg_offset; + writeq(reg_value, addr); +} + +u64 ne6x_switch_pci_read(void *bar_base, u32 base_addr, u32 offset_addr) +{ + unsigned int reg_offset = 0; + void __iomem *addr = NULL; + u64 val = 0; + + reg_offset = (base_addr << 12) + (offset_addr << 4); + addr = bar_base + reg_offset; + val = readq(addr); + + return val; +} + +void ne6x_reg_pci_write(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr, u64 reg_value) +{ + ne6x_switch_pci_write(pf->hw.hw_addr4, base_addr, offset_addr, reg_value); +} + +u64 ne6x_reg_pci_read(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr) +{ + return ne6x_switch_pci_read(pf->hw.hw_addr4, base_addr, offset_addr); +} + +#define BAR4_CSR_OFFSET 0x3C0 +u32 ne6x_reg_axi_read(struct ne6x_pf *pf, u32 offset) +{ + u64 reg_offset = offset & 0xFFFFFFFC; + u64 reg_value = 0x4000000000000000ULL + (reg_offset << 30); + + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); + reg_value = (reg_offset << 30); + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); + reg_value = ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0); + reg_value = ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0); + + return ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0) & 0xFFFFFFFFUL; +} + +void ne6x_reg_axi_write(struct ne6x_pf *pf, u32 offset, u32 value) +{ + u64 reg_offset = offset & 0xFFFFFFFC; + u64 reg_value = 0x4000000000000000ULL + (reg_offset << 30) + value; + + reg_offset = (reg_offset << 30); + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); +} + +u32 _reg_apb_read(struct ne6x_pf *pf, u64 offset) +{ + u32 offset_l = 0x27A00000 | ((offset << 4) & 0xFFFF0); + u32 offset_h; + u32 data = 0; + + if ((offset & 0xFFFFF0000ULL) != local_module_base) { + offset_h = 0x10000000 | ((offset >> 12) & 0xFFFFF0); + ne6x_reg_axi_write(pf, offset_h, 0xA1B2C3D4); + } + + data = ne6x_reg_axi_read(pf, offset_l); + + return data; +} + +void _reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value) +{ + u32 offset_l; + u32 offset_h; + + if ((offset & 0xFFFFF0000ULL) != local_module_base) { + offset_h = 0x10000000 | ((offset >> 12) & 0xFFFFF0); + ne6x_reg_axi_write(pf, offset_h, 0xA2B2C3D4); + } + + offset_l = 0x2FA00000 | ((offset << 4) & 0xFFFF0); + ne6x_reg_axi_write(pf, offset_l, value); +} + +u32 NE6X_ACCESS_TIMEOUT = 9999; +int _ne6x_reg_perform(struct ne6x_pf *pf, u32 *data, u32 *pbuf, u32 len, u32 retlen) +{ + struct axia_mbus_msg resp; + int timeout = 0, index = 0; + + memset(&resp, 0, sizeof(resp)); + + /* Write Command(s) */ + for (index = 0; index < len; index++) + _reg_apb_write(pf, PCIE2C810_SHM_MBUS_BASE + 4 * index, data[index]); + + /* Start mbus mechanism, notice c810 */ + _reg_apb_write(pf, 0x20680014, 0x3FEC); + + usleep_range(200, 300); + + /* check if c810 handle completed */ + while (timeout < NE6X_ACCESS_TIMEOUT) { + resp.hdr.uint = _reg_apb_read(pf, PCIE2C810_SHM_MBUS_BASE); + + /* resp opcode is even number, request opcode is odd number */ + if ((resp.hdr.bits.opcode & 0x01) == 0x0) + break; + + timeout++; + usleep_range(200, 220); + } + + if (timeout >= NE6X_ACCESS_TIMEOUT) { + dev_info(ne6x_pf_to_dev(pf), "%s: timeout! (%d)\n", __func__, timeout); + return -ETIMEDOUT; + } + + if (resp.hdr.bits.e == 1) { + dev_info(ne6x_pf_to_dev(pf), "%s: response.bits.e = 1 !\n", __func__); + return -EAGAIN; + } + + if (!pbuf) + return 0; + + for (index = 0; index < retlen; index++) + pbuf[index] = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE + 4 * index); + + return 0; +} + +int ne6x_reg_perform(struct ne6x_pf *pf, u32 *data, u32 *pbuf, u32 len, u32 retlen) +{ + int status; + + ne6x_reg_lock(pf); + status = _ne6x_reg_perform(pf, data, pbuf, len, retlen); + ne6x_reg_unlock(pf); + + return status; +} + +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset) +{ + u32 data; + + ne6x_reg_lock(pf); + data = _reg_apb_read(pf, offset); + ne6x_reg_unlock(pf); + + return data; +} + +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value) +{ + ne6x_reg_lock(pf); + _reg_apb_write(pf, offset, value); + ne6x_reg_unlock(pf); +} + +int ne6x_reg_indirect_read(struct ne6x_pf *pf, u32 addr, u32 *value) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_READ_REGISTER_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = addr; + + status = ne6x_reg_perform(pf, (u32 *)msg, value, 2, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_indirect_write(struct ne6x_pf *pf, u32 addr, u32 value) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_WRITE_REGISTER_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = addr; + msg->data[1] = value; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +static bool ne6x_reg_valid_table(struct ne6x_pf *pf, enum ne6x_reg_table table) +{ + if (pf->hw_flag != 0) { + if (table > NE6X_REG_ARFS_TABLE) + return false; + } else { + if (table > NE6X_REG_VF_BW_TABLE) + return false; + } + + return true; +} + +int ne6x_reg_table_read(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (size % TABLE_SIZE(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_READ(table)); + msg->hdr.bits.data_len = 12; + msg->data[0] = TABLE_ADDR(table) + index * TABLE_XNUM(table); + msg->data[1] = TABLE_XMEM(table) + size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)data, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_table_write(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table)) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_WRITE(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table) + index * TABLE_XNUM(table); + msg->data[1] = TABLE_XMEM(table) + size; + memcpy(&msg->data[2], data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + size / 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_table_insert(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *table_id) +{ + struct axia_mbus_msg *msg; + int status, count; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_INSERT(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + count = size / TABLE_SIZE_INSERT(table); + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_INSERT(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + TABLE_SIZE_INSERT(table); + memcpy((void *)&msg->data[2], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, table_id, 3 + (size >> 2), + (!table_id) ? 0 : count); + kfree(msg); + + return status; +} + +int ne6x_reg_table_delete(struct ne6x_pf *pf, enum ne6x_reg_table table, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (TABLE_SIZE_DELETE(table) != size) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_DELETE(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + size; + memcpy(&msg->data[2], data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size >> 2), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_table_search(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *ret_data, int ret_size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_LOOKUP(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1036, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_LOOKUP(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + TABLE_SIZE_LOOKUP_RET(table); + memcpy((void *)&msg->data[2], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, ret_data, 3 + (size >> 2), ret_size / 4); + kfree(msg); + + return (status != 0) ? -ENOENT : status; +} + +int ne6x_reg_table_update(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 index, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_UPDATE(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1036, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_UPDATE(table)); + msg->hdr.bits.data_len = 16 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = index; + msg->data[2] = TABLE_SIZE_UPDATE(table); + memcpy((void *)&msg->data[3], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4 + (size >> 2), 0); + kfree(msg); + + return (status != 0) ? -ENOENT : status; +} + +int ne6x_reg_talk_port(struct ne6x_pf *pf, enum ne6x_reg_talk_port talk, + enum ne6x_reg_talk_opcode opcode, + int port, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (((size % 4) != 0) || size > 512) + return -EINVAL; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (AXIA_MBUS_TALK_PORT_BASE + 4 * talk + 2 * opcode); + msg->hdr.bits.data_len = 8 + size; + msg->data[0] = port; + if (pbuf) + memcpy(&msg->data[1], pbuf, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, (opcode == NE6X_TALK_GET) ? pbuf : NULL, + 2 + ((opcode == NE6X_TALK_GET) ? 0 : (size >> 2)), + (opcode == NE6X_TALK_GET) ? (size >> 2) : 0); + kfree(msg); + + return status; +} + +int ne6x_reg_reset_firmware(struct ne6x_pf *pf) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_RESET_FIRMWARE_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 1, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_e2prom_read(struct ne6x_pf *pf, u32 offset, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 2048) + size = 2048; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_E2PROM_READ_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = offset; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_e2prom_write(struct ne6x_pf *pf, u32 offset, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 1024) + size = 1024; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_E2PROM_WRITE_COMMAND; + msg->hdr.bits.data_len = 12 + (size / 4) * 4; + msg->data[0] = (offset); + msg->data[1] = (size); + memcpy((void *)&msg->data[1], (void *)pbuf, (ssize_t)size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_fan_speed(struct ne6x_pf *pf, u32 *speed) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_FAN_SPEED_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)speed, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_fan_speed(struct ne6x_pf *pf, u32 speed) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_FAN_SPEED_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = speed; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_soc_info(struct ne6x_pf *pf, u32 class_type, u32 *ret, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_SYSTEM_INFO_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = class_type; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)ret, 3, size >> 2); + kfree(msg); + + return status; +} + +int ne6x_reg_send_bit(struct ne6x_pf *pf, u32 port, u32 mode) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_SYSTEM_INFO_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = 4; + msg->data[1] = port; + msg->data[2] = mode; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_mem_read(struct ne6x_pf *pf, u32 addr, void *pbuf, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + if (size > 1024) + size = 1024; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_READ_MEMORY_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = addr; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_mem_write(struct ne6x_pf *pf, u32 addr, void *pbuf, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 1024) + size = 1024; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_WRITE_MEMORY_COMMAND; + msg->hdr.bits.data_len = 12 + (size / 4) * 4; + msg->data[0] = addr; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size / 4), 0); + kfree(msg); + + return status; +} + +#define NE6X_FW_MAX_FRG_SIZE (4 * 1024) +int ne6x_reg_upgrade_firmware(struct ne6x_pf *pf, u8 region, u8 *data, int size) +{ + struct axia_mbus_msg *msg; + int offset = 0, left_size = 0, frag_size = 0; + int status = 0; + + msg = kzalloc(NE6X_FW_MAX_FRG_SIZE + 16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + ne6x_reg_lock(pf); + /* scile begin */ + NE6X_ACCESS_TIMEOUT = 100000; + left_size = size; + while (left_size) { + frag_size = (left_size >= NE6X_FW_MAX_FRG_SIZE) ? NE6X_FW_MAX_FRG_SIZE : left_size; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_UPGRADE_COMMAND; + msg->hdr.bits.data_len = 12 + frag_size; + msg->data[0] = region; /* region */ + msg->data[1] = frag_size; /* size */ + memcpy(&msg->data[2], data + offset, frag_size); + + status |= _ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (frag_size >> 2), 0); + if (status) + goto err_upgrade; + + left_size -= frag_size; + offset += frag_size; + } + +err_upgrade: + /* scile end */ + NE6X_ACCESS_TIMEOUT = 999; + ne6x_reg_unlock(pf); + kfree(msg); + + return status; +} + +int ne6x_reg_get_ver(struct ne6x_pf *pf, struct ne6x_firmware_ver_info *version) +{ + struct axia_mbus_msg *msg; + u32 *out_buffer = (u32 *)version; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_VER_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, out_buffer, 1, + sizeof(struct ne6x_firmware_ver_info) / sizeof(u32)); + kfree(msg); + + return status; +} + +int ne6x_reg_get_sfp_eeprom(struct ne6x_pf *pf, int port, void *pbuf, u32 offset, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 2048) + size = 2048; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = port; + msg->data[1] = offset; + msg->data[2] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 4, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_nic_start(struct ne6x_pf *pf, u32 flag) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NIC_START_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = flag; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_nic_stop(struct ne6x_pf *pf, u32 flag) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NIC_STOP_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = flag; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_nic_state(struct ne6x_pf *pf, u32 *state) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NIC_STATE_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)state, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_user_data_template(struct ne6x_pf *pf, enum np_user_data type, u32 data) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NP_USERDATA_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = type; + msg->data[1] = data; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_user_data_template(struct ne6x_pf *pf, enum np_user_data type, u32 *data) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NP_USERDATA_COMMAND; + msg->hdr.bits.data_len = 4; + msg->data[0] = type; + + status = ne6x_reg_perform(pf, (u32 *)msg, data, 2, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 data) +{ + return ne6x_reg_set_user_data_template(pf, type, data); +} + +int ne6x_reg_get_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 *data) +{ + int status = 0; + + status = ne6x_reg_get_user_data_template(pf, type, data); + + return status; +} + +int ne6x_reg_set_led(struct ne6x_pf *pf, int port, bool state) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_LED_STATE_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = port; + msg->data[1] = state; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_config_meter(struct ne6x_pf *pf, u32 meter_id, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_CONFIG_METER_COMMAND; + msg->hdr.bits.data_len = size + 8; + msg->data[0] = meter_id; + memcpy((void *)&msg->data[1], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_set_unicast_for_fastmode(struct ne6x_pf *pf, u32 index, u32 *data, + u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_FAST_L2FDB_COMMAND; + msg->hdr.bits.data_len = size + 8; + msg->data[0] = index; + memcpy((void *)&msg->data[1], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_dump_data_len(struct ne6x_pf *pf, u32 *size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_DUMP_DATA_LEN_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, size, 1, 1); + kfree(msg); + + return status; +} + +void ne6x_reg_send(struct ne6x_pf *pf, u32 cmd, u32 *data, u32 size) +{ + struct axia_mbus_msg *msg; + u32 *msg_data; + int index; + + msg = kzalloc(size + 12, GFP_KERNEL); + if (!msg) + return; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = cmd; + msg->hdr.bits.data_len = 4 + size; + memcpy((void *)&msg->data[0], (void *)data, size); + + msg_data = (u32 *)msg; + /* Write Command(s) */ + for (index = 0; index < ((size / 4) + 1); index++) + _reg_apb_write(pf, PCIE2C810_SHM_MBUS_BASE + 4 * index, msg_data[index]); + + /* Start mbus mechanism, notice c810 */ + _reg_apb_write(pf, 0x20680014, 0x3FEC); + usleep_range(1000, 1200); + kfree(msg); +} + +int ne6x_reg_polling(struct ne6x_pf *pf, u32 cmd, u32 *data, u32 buf_size, + u32 *real_size) +{ + int timeout = 0, offset = 0; + struct axia_mbus_msg resp; + int index, status; + + memset(&resp, 0, sizeof(resp)); + + /* check if c810 handle completed */ + while (timeout < NE6X_ACCESS_TIMEOUT) { + resp.hdr.uint = _reg_apb_read(pf, PCIE2C810_SHM_MBUS_BASE); + if (resp.hdr.bits.opcode == cmd) + break; + + timeout++; + usleep_range(200, 220); + } + + status = (timeout >= NE6X_ACCESS_TIMEOUT) ? -ETIMEDOUT : 0; + status = (resp.hdr.bits.e == 1) ? -EAGAIN : status; + if (status) { + dev_info(ne6x_pf_to_dev(pf), "%s: cmd %d status (%d)\n", __func__, cmd, status); + return status; + } + + switch (cmd) { + case AXIA_MBUS_GET_DUMP_DATA_ACK: + *real_size = resp.hdr.bits.data_len - sizeof(resp) - sizeof(u32); + offset = sizeof(u32); + pf->dump_info = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE); + break; + default: + *real_size = resp.hdr.bits.data_len - sizeof(resp); + offset = 0; + break; + } + + if (*real_size > buf_size) + *real_size = buf_size; + + for (index = 0; index < (*real_size) / 4; index++) + data[index] = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE + 4 * index + offset); + + return 0; +} + +int ne6x_reg_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size) +{ + u32 *temp_buff = data; + u32 left_size = size; + u32 real_size = 0; + + memset(&pf->dump_info, 0, sizeof(u32)); + + ne6x_reg_lock(pf); + while (left_size > 0) { + temp_buff += real_size / 4; + ne6x_reg_send(pf, AXIA_MBUS_GET_DUMP_DATA_COMMAND, (u32 *)&pf->dump_info, 4); + if (ne6x_reg_polling(pf, AXIA_MBUS_GET_DUMP_DATA_ACK, + temp_buff, left_size, &real_size)) { + ne6x_reg_unlock(pf); + return -EAGAIN; + } + + left_size -= real_size; + } + ne6x_reg_unlock(pf); + + return 0; +} + +int ne6x_reg_clear_table(struct ne6x_pf *pf, u32 table_id) +{ + struct axia_mbus_msg *msg; + int status; + + if (!ne6x_reg_valid_table(pf, table_id)) + return -EINVAL; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + NE6X_ACCESS_TIMEOUT = 99999; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_CLR_TABLE_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = table_id; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + NE6X_ACCESS_TIMEOUT = 9999; + + return status; +} + +int ne6x_reg_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = write_protect; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(512, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, p_write_protect, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_write_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *pdata) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(512, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16 + length; + msg->data[0] = NE6X_NORFLASH_OP_WRITE_E; + msg->data[1] = offset; + msg->data[2] = length; + memcpy((void *)&msg->data[3], (void *)pdata, length); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4 + (length >> 2), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_erase_norflash(struct ne6x_pf *pf, u32 offset, u32 length) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = NE6X_NORFLASH_OP_ERASE_E; + msg->data[1] = offset; + msg->data[2] = length; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_read_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *p) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = NE6X_NORFLASH_OP_READ_E; + msg->data[1] = offset; + msg->data[2] = length; + + status = ne6x_reg_perform(pf, (u32 *)msg, p, 4, length >> 2); + kfree(msg); + + return status; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h new file mode 100644 index 000000000000..cf8a7c5767a1 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_REG_H +#define _NE6X_REG_H + +#include + +struct ne6x_diag_reg_test_info { + u32 offset; /* the base register */ + u64 mask; /* bits that can be tested */ + u32 elements; /* number of elements if array */ + u32 stride; /* bytes between each element */ +}; + +enum ne6x_reg_table { + NE6X_REG_RSS_TABLE = 0x0, + NE6X_REG_L2FDB_TABLE, + NE6X_REG_VLAN_TABLE, + NE6X_REG_MAC_LEARN_TABLE, + NE6X_REG_VF_STAT_TABLE, + NE6X_REG_VF_BW_TABLE, + NE6X_REG_ACL_TABLE, + NE6X_REG_ARFS_TABLE, + NE6X_REG_TABLE_LAST, +}; + +enum ne6x_reg_talk_port { + NE6X_MSG_PORT_ENABLE = 0, + NE6X_MSG_PORT_DUPLEX, + NE6X_MSG_PORT_SPEED, + NE6X_MSG_PORT_STATS, + NE6X_MSG_PORT_SFP_SPEED, + NE6X_MSG_PORT_FEC, + NE6X_MSG_PORT_SPEED_MAX, + NE6X_MSG_PORT_PAUSE, + NE6X_MSG_PORT_PAUSE_ADDR, + NE6X_MSG_PORT_LOOPBACK, + NE6X_MSG_PORT_MAX_FRAME, + NE6X_MSG_PORT_AUTO_NEG, + NE6X_MSG_PORT_INFO, + NE6X_MSG_PORT_LINK_STATUS, + NE6X_MSG_PORT_DRV_I2C, + NE6X_MSG_PORT_SELF_TEST, + NE6X_MSG_PORT_SFP_TYPE_LEN, + NE6X_MSG_PORT_SFP_EEPROM, + NE6X_MSG_PORT_STATE, +}; + +enum ne6x_reg_talk_opcode { + NE6X_TALK_SET = 0, + NE6X_TALK_GET +}; + +extern struct ne6x_diag_reg_test_info ne6x_reg_list[]; + +struct table_info { + u32 addr; /* 00 - 27: max_size + * 28 - 31: engine_idx + */ + u32 size; + /* 00 - 15: length + * 16 - 20: + * 21 - 23: entry_num + * 24 - 26: mem_type + * 27 - 27: mem_type_bucekt + * 28 - 31: opcode + */ + u16 opcode_read; + u16 opcode_write; +#define ADV_CMD_DISABLE 0x00 +#define ADV_CMD_EBABLE 0x01 + u32 advanced_cmd; + u16 opcode_insert; + u16 opcode_delete; + u16 opcode_update; + u16 opcode_search; + u16 size_insert; + u16 size_delete; + u16 size_search; + u16 size_update; +}; + +struct rss_table { + u32 resv; + u32 flag; + u32 hash_fun; /* 24-31, func, 23-1,type */ + u32 queue_base; + u16 queue_def; + u16 queue_size; + u16 entry_num; + u16 entry_size; + u8 entry_data[128]; + u8 hash_key[352]; + u8 resv1[8]; +}; + +struct l2fdb_dest_unicast { + u8 flags; /* bit0 -- static,bit1---multicast */ + u8 rsv[3]; + u32 vp_bmp[3]; + u32 cnt; /* leaf num */ + u8 resv3[44]; +}; + +struct l2fdb_dest_multicast { + u8 flags; /* bit0 -- static,bit1---multicast */ + u8 resv3[3]; + u32 vp_bmp[3]; + u8 resv4[48]; +}; + +struct l2fdb_search_result { + u32 key_index; + union { + struct l2fdb_dest_unicast unicast; + struct l2fdb_dest_multicast multicast; + } fw_info; +}; + +struct l2fdb_table { + u8 resv1; + u8 pport; + u8 mac[6]; + u32 vlanid; + u8 resv2[52]; + union { + struct l2fdb_dest_unicast unicast; + struct l2fdb_dest_multicast multicast; + } fw_info; /* forward info */ +}; + +struct l2fdb_fast_table { + u8 mac[6]; + u8 start_cos; + u8 cos_num; +}; + +struct meter_table { + u32 cir; + u32 cbs; + u32 pir; + u32 pbs; +}; + +enum np_user_data { + NP_USER_DATA_HW_FEATURES = 0, + NP_USER_DATA_HW_FLAGS = 1, + NP_USER_DATA_RSS_TABLE_SIZE = 2, + NP_USER_DATA_RSS_TABLE_ENTRY_WIDTH = 3, + NP_USER_DATA_RSS_HASH_KEY_BLOCK_SIZE = 4, + NP_USER_DATA_PORT2PI_0 = 5, + NP_USER_DATA_PI2PORT_0 = 25, + NP_USER_DATA_VLAN_TYPE = 33, + NP_USER_DATA_RSV_0 = 34, + NP_USER_DATA_RSV_1 = 35, + NP_USER_DATA_RSV_2 = 36, + NP_USER_DATA_PI0_BROADCAST_LEAF = 37, + NP_USER_DATA_PORT_OLFLAGS_0 = 53, + NP_USER_DATA_PORT_2_COS_0 = 121, + NP_USER_DATA_VPORT0_LINK_STATUS = 155, + NP_USER_DATA_TSO_CKSUM_DISABLE = 156, + NP_USER_DATA_PORT0_MTU = 157, + NP_USER_DATA_PORT0_QINQ = 161, + NP_USER_DATA_CQ_SIZE = 229, + NP_USER_DATA_FAST_MODE = 230, + NP_USER_DATA_SUB_FLAG = 231, + NP_USER_DATA_DDOS_FLAG = 242, + NP_USER_DATA_END = 255, +}; + +struct ne6x_diag_reg_info { + u32 address; + u32 value; +}; + +enum { + NE6X_NORFLASH_OP_WRITE_E = 0, + NE6X_NORFLASH_OP_READ_E = 1, + NE6X_NORFLASH_OP_ERASE_E = 2, + NE6X_NORFLASH_OP_E_END, +}; + +void ne6x_reg_pci_write(struct ne6x_pf *pf, u32 base_addr, + u32 offset_addr, u64 reg_value); +u64 ne6x_reg_pci_read(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr); + +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset); +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value); +int ne6x_reg_reset_firmware(struct ne6x_pf *pf); +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset); +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value); + +int ne6x_reg_indirect_read(struct ne6x_pf *pf, u32 addr, u32 *value); +int ne6x_reg_indirect_write(struct ne6x_pf *pf, u32 addr, u32 value); +int ne6x_reg_table_read(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size); +int ne6x_reg_table_write(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size); +int ne6x_reg_table_insert(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *table_id); +int ne6x_reg_table_delete(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size); +int ne6x_reg_table_search(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *ret_data, int ret_size); + +int ne6x_reg_e2prom_read(struct ne6x_pf *pf, u32 offset, void *pbuf, int size); +int ne6x_reg_e2prom_write(struct ne6x_pf *pf, u32 offset, void *pbuf, int size); +int ne6x_reg_set_fan_speed(struct ne6x_pf *pf, u32 speed); +int ne6x_reg_get_fan_speed(struct ne6x_pf *pf, u32 *speed); + +int ne6x_reg_get_soc_info(struct ne6x_pf *pf, u32 class_type, u32 *ret, u32 size); +int ne6x_reg_talk_port(struct ne6x_pf *pf, enum ne6x_reg_talk_port talk, + enum ne6x_reg_talk_opcode opcode, int port, + void *pbuf, int size); +int ne6x_reg_upgrade_firmware(struct ne6x_pf *pf, u8 region, u8 *data, int size); + +int ne6x_reg_get_ver(struct ne6x_pf *pf, struct ne6x_firmware_ver_info *version); + +int ne6x_reg_get_sfp_eeprom(struct ne6x_pf *pf, int port, void *pbuf, + u32 offset, int size); + +int ne6x_reg_nic_start(struct ne6x_pf *pf, u32 flag); +int ne6x_reg_nic_stop(struct ne6x_pf *pf, u32 flag); + +int ne6x_reg_get_nic_state(struct ne6x_pf *pf, u32 *state); + +int ne6x_reg_set_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 data); +int ne6x_reg_get_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 *data); + +int ne6x_reg_set_led(struct ne6x_pf *pf, int port, bool state); +int ne6x_reg_config_meter(struct ne6x_pf *pf, u32 meter_id, u32 *data, int size); + +int ne6x_reg_send_bit(struct ne6x_pf *pf, u32 port, u32 mode); + +int ne6x_reg_set_unicast_for_fastmode(struct ne6x_pf *pf, u32 index, + u32 *data, u32 size); +int ne6x_reg_get_dump_data_len(struct ne6x_pf *pf, u32 *size); +int ne6x_reg_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size); +int ne6x_reg_clear_table(struct ne6x_pf *pf, u32 table_id); + +int ne6x_reg_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect); +int ne6x_reg_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect); + +int ne6x_reg_write_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *pdata); +int ne6x_reg_erase_norflash(struct ne6x_pf *pf, u32 offset, u32 length); +int ne6x_reg_read_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *p); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_trace.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_trace.h new file mode 100644 index 000000000000..892e38d19059 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_trace.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef CONFIG_TRACEPOINTS +#if !defined(_NE6X_TRACE_H_) +#define _NE6X_TRACE_H_ + +#define ne6x_trace(trace_name, args...) +#define ne6x_trace_enabled(trace_name) (0) +#endif /* !defined(_NE6X_TRACE_H_) */ +#else /* CONFIG_TRACEPOINTS */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ne6x + +#if !defined(_NE6X_VF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _NE6X_TRACE_H_ + +#include +#include "trace_comm.h" +#endif /* _NE6X_TRACE_H_ */ +/* This must be outside ifdef _NE6X_TRACE_H_ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE ne6x_trace +#include +#endif /* CONFIG_TRACEPOINTS */ diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c new file mode 100644 index 000000000000..a60f6b1d3721 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_txrx.h" +#include "ne6x_reg.h" +#include "ne6x_trace.h" + +int ne6x_adpt_setup_tx_resources(struct ne6x_adapter *adpt) +{ + int i, err = 0; + + for (i = 0; i < adpt->num_queue && !err; i++) { + err = ne6x_setup_tx_descriptors(adpt->tx_rings[i]); + err = ne6x_setup_tg_descriptors(adpt->tg_rings[i]); + err = ne6x_setup_cq_descriptors(adpt->cq_rings[i]); + err = ne6x_setup_tx_sgl(adpt->tx_rings[i]); + } + + return err; +} + +int ne6x_adpt_setup_rx_resources(struct ne6x_adapter *adpt) +{ + int i, err = 0; + + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_setup_rx_descriptors(adpt->rx_rings[i]); + + return err; +} + +static inline void ne6x_update_enable_itr(struct ne6x_q_vector *q_vector) +{ + struct ne6x_adapter *adpt = (struct ne6x_adapter *)q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + + u64 val = 1ULL << NE6X_VP_CQ_INTSHIFT; + + if (!test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + struct ne6x_ring *cq_ring = NULL; + + cq_ring = q_vector->cq.ring; + if (cq_ring->next_to_clean != cq_ring->next_to_use) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + if (q_vector->reg_idx < NE6X_PF_VP0_NUM) { + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT), val); + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT_MASK), ~(val)); + } else { + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT), val); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), ~(val)); + } + } +} + +int ne6x_napi_poll(struct napi_struct *napi, int budget) +{ + struct ne6x_q_vector *q_vector = container_of(napi, struct ne6x_q_vector, napi); + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)q_vector->adpt; + struct ne6x_ring *ring = NULL; + bool clean_complete = true; + int cq_budget = 16; + int work_done = 0; + int cleaned = 0; + + if (test_bit(NE6X_ADPT_DOWN, comm->state)) { + napi_complete(napi); + return 0; + } + + ring = q_vector->cq.ring; + cleaned = ne6x_clean_cq_irq(q_vector, ring, cq_budget); + if (cleaned >= cq_budget) + clean_complete = false; + + ring = q_vector->tx.ring; + if (!ne6x_clean_tx_irq(comm, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + ring = q_vector->rx.ring; + cleaned = ne6x_clean_rx_irq(ring, budget); + if (cleaned >= budget) + clean_complete = false; + + work_done += cleaned; + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + ne6x_update_enable_itr(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + ne6x_update_enable_itr(q_vector); + + return min(work_done, budget - 1); +} + +void ne6x_adpt_clear_rings(struct ne6x_adapter *adpt) +{ + int i; + + if (adpt->tx_rings && adpt->tx_rings[0]) { + for (i = 0; i < adpt->num_queue; i++) { + kfree_rcu(adpt->tx_rings[i], rcu); + adpt->tx_rings[i] = NULL; + adpt->rx_rings[i] = NULL; + adpt->cq_rings[i] = NULL; + } + } +} + +int ne6x_alloc_rings(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_ring *ring; + int i, qpv = 4; + + /* Set basic values in the rings to be used later during open() */ + for (i = 0; i < adpt->num_queue; i++) { + /* allocate space for both Tx and Rx in one shot */ + ring = kcalloc(qpv, sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_out; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_tx_desc; + ring->size = 0; + adpt->tx_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_cq_desc; + ring->size = 0; + adpt->cq_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_rx_desc; + ring->size = 0; + adpt->rx_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_tg_desc; + ring->size = 0; + adpt->tg_rings[i] = ring; + } + + return 0; + +err_out: + ne6x_adpt_clear_rings(adpt); + return -ENOMEM; +} + +static int ne6x_configure_tx_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_sq_base_addr sq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_sq_cfg sq_cfg; + + /* SRIOV mode VF Config OR SRIOV disabled PF Config */ + if (pf_q < NE6X_PF_VP0_NUM) { + sq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_BASE_ADDR), sq_base_addr.val); + + sq_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = ring->count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_CFG), sq_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_HD_POINTER), 0x0); + + /* cache tail off for easier writes later */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_TDQ(pf_q, 0x0) >> 3]; + } else { + /* SRIOV mode PF Config */ + sq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_BASE_ADDR), + sq_base_addr.val); + + sq_cfg.val = + rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = ring->count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_SQ_CFG), sq_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_SQ_HD_POINTER), 0x0); + + /* cache tail off for easier writes later */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_TDQ(pf_q, 0x0) >> 3]; + } + + return 0; +} + +int ne6x_adpt_configure_tx(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + + for (i = 0; (i < adpt->num_queue) && !err; i++) + err = ne6x_configure_tx_ring(adpt->tx_rings[i]); + + return err; +} + +static int ne6x_configure_cq_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_cq_base_addr cq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_cq_cfg cq_cfg; + + /* SRIOV enabled VF config OR SRIOV disabled PF config */ + if (pf_q < NE6X_PF_VP0_NUM) { + cq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_BASE_ADDR), cq_base_addr.val); + + cq_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = ring->count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_CFG), cq_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_TAIL_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (void __iomem *)hw->hw_addr0 + + (NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_HD_POINTER)); + writeq(0, ring->tail); + } else { + /* SRIOV enable PF config */ + cq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_BASE_ADDR), + cq_base_addr.val); + + cq_cfg.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = ring->count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_CFG), + cq_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_CQ_TAIL_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (void __iomem *)hw->hw_addr4 + + (NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_HD_POINTER)); + writeq(0, ring->tail); + } + + return 0; +} + +int ne6x_adpt_configure_cq(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + /* set up individual rings */ + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_configure_cq_ring(adpt->cq_rings[i]); + + return 0; +} + +static int ne6x_configure_rx_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_rq_block_cfg rq_block_cfg; + union ne6x_rq_base_addr rq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_rq_cfg rc_cfg; + u16 rxmax = 0; + + ring->rx_buf_len = adpt->rx_buf_len; + + if (pf_q < NE6X_PF_VP0_NUM) { + rq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BASE_ADDR), rq_base_addr.val); + + rxmax = min_t(u16, adpt->max_frame, ring->rx_buf_len); + rq_block_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = rxmax; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BLOCK_CFG), rq_block_cfg.val); + + rc_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = ring->count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_CFG), rc_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_HD_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_RDQ(pf_q, 0x0) >> 3]; + } else { + /* SRIOV enabled PF Config */ + rq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_BASE_ADDR), + rq_base_addr.val); + + rxmax = min_t(u16, adpt->max_frame, ring->rx_buf_len); + rq_block_cfg.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = rxmax; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BLOCK_CFG), + rq_block_cfg.val); + + rc_cfg.val = + rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = ring->count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_CFG), rc_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_HD_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_RDQ(pf_q, 0x0) >> 3]; + } + + return 0; +} + +int ne6x_adpt_configure_rx(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + + adpt->max_frame = NE6X_MAX_RXBUFFER; + adpt->rx_buf_len = (PAGE_SIZE < 8192) ? NE6X_RXBUFFER_4096 : NE6X_RXBUFFER_4096; + + /* set up individual rings */ + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_configure_rx_ring(adpt->rx_rings[i]); + + return err; +} + +netdev_tx_t ne6x_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_ring *tx_ring = adpt->tx_rings[skb->queue_mapping]; + struct ne6x_ring *tag_ring = adpt->tg_rings[skb->queue_mapping]; + struct sk_buff *trailer; + int tailen = 4; + int nsg; + bool jumbo_frame = true; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, NE6X_MIN_TX_LEN)) + return NETDEV_TX_OK; + + /* single packet add 4 byte to CRC */ + if (skb->len < NE6X_MAX_DATA_PER_TXD) { + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + netdev_err(adpt->netdev, "TX: skb_cow_data() returned %d\n", nsg); + return nsg; + } + + pskb_put(skb, trailer, tailen); + jumbo_frame = false; + } + + if (netdev->gso_max_size < skb->len) + netdev_err(adpt->netdev, "%s: skb->len = %d > 15360\n", __func__, skb->len); + + return ne6x_xmit_frame_ring(skb, tx_ring, tag_ring, jumbo_frame); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h new file mode 100644 index 000000000000..b09563cfc4e3 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_TXRX_H +#define _NE6X_TXRX_H + +int ne6x_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t ne6x_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c new file mode 100644 index 000000000000..e147e01efd6d --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c @@ -0,0 +1,2388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_interrupt.h" + +void ne6x_clear_vf_status(struct ne6x_vf *vf) +{ + struct ne6x_flowctrl flowctrl; + + flowctrl.rx_pause = 0; + flowctrl.tx_pause = 0; + ne6x_dev_set_flowctrl(vf->adpt, &flowctrl); + ne6x_dev_set_vf_bw(vf->adpt, 0); +} + +void ne6x_mbx_deinit_snapshot(struct ne6x_hw *hw) +{ + struct ne6x_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Free VF counter array and reset vf counter length */ + kfree(snap->mbx_vf.vf_cntr); + snap->mbx_vf.vfcntr_len = 0; +} + +int ne6x_mbx_init_snapshot(struct ne6x_hw *hw, u16 vf_count) +{ + struct ne6x_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Ensure that the number of VFs allocated is non-zero and + * is not greater than the number of supported VFs defined in + * the functional capabilities of the PF. + */ + if (!vf_count || vf_count > NE6X_MAX_VP_NUM) + return 1; + + snap->mbx_vf.vf_cntr = kcalloc(vf_count, sizeof(*snap->mbx_vf.vf_cntr), GFP_KERNEL); + if (!snap->mbx_vf.vf_cntr) + return 1; + + /* Setting the VF counter length to the number of allocated + * VFs for given PF's functional capabilities. + */ + snap->mbx_vf.vfcntr_len = vf_count; + snap->state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + memset(hw->ne6x_mbx_ready_to_send, true, 64); + + return 0; +} + +int ne6x_status_to_errno(int err) +{ + if (err) + return -EINVAL; + + return 0; +} + +void ne6x_set_vf_state_qs_dis(struct ne6x_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + if (test_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states)) + clear_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states); +} + +static void ne6x_dis_vf_qs(struct ne6x_vf *vf) +{ + ne6x_set_vf_state_qs_dis(vf); +} + +bool ne6x_is_reset_in_progress(unsigned long *state) +{ + return test_bit(NE6X_PF_RESET_REQUESTED, state) || + test_bit(NE6X_RESET_INTR_RECEIVED, state) || + test_bit(NE6X_CORE_RESET_REQUESTED, state) || + test_bit(NE6X_GLOBAL_RESET_REQUESTED, state); +} + +void ne6x_adpt_close_vf(struct ne6x_adapter *adpt, u16 vf_id) +{ + if (!test_and_set_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + clear_bit(NE6X_ADPT_DOWN, adpt->comm.state); +} + +static int ne6x_adpt_clear_vf(struct ne6x_adapter *adpt) +{ + struct mac_addr_head *mc_head = &adpt->mc_mac_addr; + struct mac_addr_head *uc_head = &adpt->uc_mac_addr; + struct mac_addr_node *temp_node, *addr_node; + struct ne6x_vlan_filter *vlf, *vlftmp; + struct ne6x_pf *pf; + + if (!adpt) + return 0; + + if (!adpt->back) + goto free_adpt; + + pf = adpt->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->adpt[adpt->idx]) { + dev_err(&pf->pdev->dev, "pf->adpt[%d] is NULL, just free adpt[%d](type %d)\n", + adpt->idx, adpt->idx, adpt->type); + goto unlock_adpt; + } + + if (pf->adpt[adpt->idx] != adpt) { + dev_err(&pf->pdev->dev, "pf->adpt[%d](type %d) != adpt[%d](type %d): no free!\n", + pf->adpt[adpt->idx]->idx, pf->adpt[adpt->idx]->type, adpt->idx, adpt->type); + goto unlock_adpt; + } + + pf->adpt[adpt->idx] = NULL; + if (adpt->idx < pf->next_adpt) + pf->next_adpt = adpt->idx; + + kfree(adpt->tx_rings); + adpt->tx_rings = NULL; + + kfree(adpt->q_vectors); + adpt->q_vectors = NULL; + + kfree(adpt->port_info); + adpt->port_info = NULL; + + /* release adpt multicast addr list resource */ + mutex_lock(&mc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &mc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&mc_head->mutex); + + /* release adpt unicast addr list resource */ + mutex_lock(&uc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &uc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&uc_head->mutex); + + spin_lock_bh(&adpt->mac_vlan_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adpt->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + +unlock_adpt: + mutex_unlock(&pf->switch_mutex); +free_adpt: + kfree(adpt); + + return 0; +} + +int ne6x_adpt_release_vf(struct ne6x_adapter *adpt, u16 vf_id) +{ + struct ne6x_pf *pf; + + if (!adpt->back) + return -ENODEV; + + pf = adpt->back; + + if (adpt->netdev && !ne6x_is_reset_in_progress(pf->state) && + (test_bit(NE6X_ADPT_NETDEV_REGISTERED, adpt->comm.state))) { + unregister_netdev(adpt->netdev); + clear_bit(NE6X_ADPT_NETDEV_REGISTERED, adpt->comm.state); + } + + ne6x_adpt_close_vf(adpt, vf_id); + + if (!ne6x_is_reset_in_progress(pf->state)) + ne6x_adpt_clear_vf(adpt); + + return 0; +} + +struct ne6x_adapter *ne6x_get_vf_adpt(struct ne6x_vf *vf) +{ + return vf->pf->adpt[vf->lan_adpt_idx]; +} + +static void ne6x_vf_invalidate_adpt(struct ne6x_vf *vf) +{ + vf->lan_adpt_idx = NE6X_NO_ADPT; +} + +static void ne6x_vf_adpt_release(struct ne6x_vf *vf) +{ + ne6x_adpt_clear_mac_vlan(ne6x_get_vf_adpt(vf)); + ne6x_dev_del_broadcast_leaf(ne6x_get_vf_adpt(vf)); + ne6x_dev_set_features(vf->adpt, 0); + ne6x_dev_del_vf_qinq(vf, 0, 0); + ne6x_adpt_release_vf(ne6x_get_vf_adpt(vf), vf->vf_id); + ne6x_vf_invalidate_adpt(vf); +} + +static void ne6x_free_vf_res(struct ne6x_vf *vf) +{ + /* First, disable VF's configuration API to prevent OS from + * accessing the VF's adapter after it's freed or invalidated. + */ + clear_bit(NE6X_VF_STATE_INIT, vf->vf_states); + + /* free adapter and disconnect it from the parent uplink */ + if (vf->lan_adpt_idx != NE6X_NO_ADPT) { + if (vf->tx_rate) { + ne6x_dev_set_vf_bw(ne6x_get_vf_adpt(vf), 0); + vf->tx_rate = 0; + } + + ne6x_vf_adpt_release(vf); + } +} + +static int ne6x_sriov_free_msix_res(struct ne6x_pf *pf) +{ + struct ne6x_lump_tracking *res; + + if (!pf) + return -EINVAL; + + res = pf->irq_pile; + if (!res) + return -EINVAL; + + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + return 0; +} + +void ne6x_free_vfs(struct ne6x_pf *pf) +{ + struct device *dev = ne6x_pf_to_dev(pf); + unsigned int tmp, i; + u64 reg; + + if (!pf->vf) + return; + + while (test_and_set_bit(NE6X_VF_DIS, pf->state)) + usleep_range(1000, 2000); + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); + + /* Avoid wait time by stopping all VFs at the same time */ + ne6x_for_each_vf(pf, i) { + if (test_bit(NE6X_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ne6x_dis_vf_qs(&pf->vf[i]); + } + + tmp = pf->num_alloc_vfs; + pf->num_qps_per_vf = 0; + pf->num_alloc_vfs = 0; + + for (i = 0; i < tmp; i++) { + if (test_bit(NE6X_VF_STATE_INIT, pf->vf[i].vf_states)) { + set_bit(NE6X_VF_STATE_DIS, pf->vf[i].vf_states); + ne6x_free_vf_res(&pf->vf[i]); + } + } + + if (ne6x_sriov_free_msix_res(pf)) + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); + + ne6x_dev_clear_vport(pf); + kfree(pf->vf); + pf->vf = NULL; + + reg = rd64_bar4(&pf->hw, 0x05300); + reg &= ~0xfc000; + reg |= 0x8000; + wr64_bar4(&pf->hw, 0x05300, reg); + + clear_bit(NE6X_VF_DIS, pf->state); +} + +static int ne6x_alloc_vfs(struct ne6x_pf *pf, int num_vfs) +{ + struct ne6x_vf *vfs; + + vfs = kcalloc(num_vfs, sizeof(*vfs), GFP_KERNEL); + if (!vfs) + return -ENOMEM; + + pf->vf = vfs; + pf->num_alloc_vfs = num_vfs; + + return 0; +} + +static int ne6x_sriov_set_msix_res(struct ne6x_pf *pf, u16 num_msix_needed) +{ + int sriov_base_vector; + + sriov_base_vector = NE6X_MAX_MSIX_NUM - num_msix_needed; + + /* make sure we only grab irq_tracker entries from the list end and + * that we have enough available MSIX vectors + */ + if (sriov_base_vector < 0) + return -EINVAL; + + return 0; +} + +static int ne6x_set_per_vf_res(struct ne6x_pf *pf) +{ + struct device *dev = ne6x_pf_to_dev(pf); + u16 queue; + + if (!pf->num_alloc_vfs) + return -EINVAL; + + queue = NE6X_MAX_VP_NUM / pf->num_alloc_vfs; + + if (ne6x_sriov_set_msix_res(pf, queue * pf->num_alloc_vfs)) { + dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", pf->num_alloc_vfs); + return -EINVAL; + } + + /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ + pf->num_qps_per_vf = queue; + dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", pf->num_alloc_vfs, + pf->num_qps_per_vf, pf->num_qps_per_vf); + + return 0; +} + +static void ne6x_vc_clear_allowlist(struct ne6x_vf *vf) +{ + bitmap_zero(vf->opcodes_allowlist, VIRTCHNL_OP_MAX); +} + +/* default opcodes to communicate with VF */ +static const u32 default_allowlist_opcodes[] = { + VIRTCHNL_OP_GET_VF_RESOURCES, + VIRTCHNL_OP_VERSION, + VIRTCHNL_OP_RESET_VF, +}; + +static void ne6x_vc_allowlist_opcodes(struct ne6x_vf *vf, const u32 *opcodes, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + set_bit(opcodes[i], vf->opcodes_allowlist); +} + +void ne6x_vc_set_default_allowlist(struct ne6x_vf *vf) +{ + ne6x_vc_clear_allowlist(vf); + ne6x_vc_allowlist_opcodes(vf, default_allowlist_opcodes, + ARRAY_SIZE(default_allowlist_opcodes)); +} + +static void ne6x_set_dflt_settings_vfs(struct ne6x_pf *pf) +{ + int i; + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + vf->pf = pf; + vf->vf_id = i; + vf->base_queue = (NE6X_MAX_VP_NUM / pf->num_alloc_vfs) * i; + vf->num_vf_qs = pf->num_qps_per_vf; + vf->tx_rate = 0; + test_and_clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_vc_set_default_allowlist(vf); + } +} + +void ne6x_send_init_mbx_mesg(struct ne6x_pf *pf) +{ + struct ne6x_hw *hw = &pf->hw; + u64 reg_cfg; + int i; + + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + wr64_bar4(hw, NE6X_PF_MAILBOX_ADDR(vf->base_queue), 0x0); + reg_cfg = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK)); + reg_cfg &= ~(1ULL << vf->base_queue); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), reg_cfg); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), reg_cfg); + } +} + +static struct ne6x_port_info *ne6x_vf_get_port_info(struct ne6x_vf *vf) +{ + struct ne6x_adapter *adpt = ne6x_get_vf_adpt(vf); + + return adpt->port_info; +} + +static struct ne6x_adapter *ne6x_adpt_alloc(struct ne6x_pf *pf, u16 vf_id, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + struct ne6x_adapter *adpt = NULL; + int pf_adpt_idx; + + /* Need to protect the allocation of the adapters at the PF level */ + mutex_lock(&pf->switch_mutex); + + /* If we have already allocated our maximum number of adapters, + * pf->next_adpt will be NE6X_NO_ADPT. If not, pf->next_adpt index + * is available to be populated + */ + if (pf->next_adpt == NE6X_NO_ADPT) { + dev_dbg(dev, "out of adapter slots!\n"); + goto unlock_pf; + } + + adpt = kzalloc(sizeof(*adpt), GFP_KERNEL); + adpt->back = pf; + adpt->type = NE6X_ADPT_VF; + set_bit(NE6X_ADPT_DOWN, adpt->comm.state); + + adpt->num_queue = pf->vf[vf_id].num_vf_qs; + adpt->num_q_vectors = pf->vf[vf_id].num_vf_qs; + /* vf_id 0 -- 63: vport: 0 -- 64: pf: 64 -- 68 */ + adpt->idx = pf->vf[vf_id].vf_id + pf->num_alloc_adpt; + adpt->vport = pf->vf[vf_id].vf_id; + adpt->port_info = kzalloc(sizeof(*adpt->port_info), GFP_KERNEL); + if (!adpt->port_info) + goto err_rings; + + /* vf attach pf alloc */ + pf_adpt_idx = pf->vf[vf_id].base_queue / (NE6X_MAX_VP_NUM / pf->hw.pf_port); + adpt->port_info->lport = pf->adpt[pf_adpt_idx]->port_info->lport; + adpt->port_info->hw_port_id = pf->adpt[pf_adpt_idx]->port_info->hw_port_id; + adpt->port_info->hw = &pf->hw; + adpt->port_info->hw_trunk_id = pf->adpt[pf_adpt_idx]->port_info->hw_trunk_id; + adpt->port_info->hw_queue_base = pf->vf[vf_id].base_queue; + adpt->port_info->hw_max_queue = pf->vf[vf_id].num_vf_qs; + adpt->base_queue = pf->vf[vf_id].base_queue; + + /* init multicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->mc_mac_addr.list); + mutex_init(&adpt->mc_mac_addr.mutex); + + /* init unicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->uc_mac_addr.list); + mutex_init(&adpt->uc_mac_addr.mutex); + + /* init vlan list head node */ + spin_lock_init(&adpt->mac_vlan_list_lock); + INIT_LIST_HEAD(&adpt->vlan_filter_list); + + pf->adpt[adpt->idx] = adpt; + + goto unlock_pf; + +err_rings: + kfree(adpt); + adpt = NULL; +unlock_pf: + mutex_unlock(&pf->switch_mutex); + return adpt; +} + +struct ne6x_adapter *ne6x_adpt_setup_vf(struct ne6x_pf *pf, u16 vf_id, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + struct ne6x_adapter *adpt; + + adpt = ne6x_adpt_alloc(pf, vf_id, num_vfs); + if (!adpt) { + dev_err(dev, "could not allocate adapter\n"); + return NULL; + } + + return adpt; +} + +static struct ne6x_adapter *ne6x_vf_adpt_setup(struct ne6x_vf *vf, u16 num_vfs) +{ + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *adpt; + + adpt = ne6x_adpt_setup_vf(pf, vf->vf_id, num_vfs); + if (!adpt) { + dev_err(ne6x_pf_to_dev(pf), "Failed to create VF adapter\n"); + ne6x_vf_invalidate_adpt(vf); + return NULL; + } + + vf->lan_adpt_idx = adpt->idx; + vf->adpt = adpt; + + return adpt; +} + +static int ne6x_init_vf_adpt_res(struct ne6x_vf *vf, u16 num_vfs) +{ + struct ne6x_pf *pf = vf->pf; + u8 broadcast[ETH_ALEN]; + struct ne6x_adapter *adpt; + struct device *dev; + + dev = ne6x_pf_to_dev(pf); + adpt = ne6x_vf_adpt_setup(vf, num_vfs); + if (!adpt) + return -ENOMEM; + + vf->tx_rate = 0; + ne6x_dev_set_vf_bw(adpt, vf->tx_rate); + eth_broadcast_addr(broadcast); + + return 0; +} + +static int ne6x_start_vfs(struct ne6x_pf *pf, u16 num_vfs) +{ + int retval, i; + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + retval = ne6x_init_vf_adpt_res(vf, num_vfs); + if (retval) { + dev_err(ne6x_pf_to_dev(pf), "Failed to initialize adapter resources for VF %d, error %d\n", + vf->vf_id, retval); + goto teardown; + } + + set_bit(NE6X_VF_STATE_INIT, vf->vf_states); + } + + ne6x_linkscan_schedule(pf); + + return 0; + +teardown: + for (i = i - 1; i >= 0; i--) { + struct ne6x_vf *vf = &pf->vf[i]; + + ne6x_vf_adpt_release(vf); + } + + return retval; +} + +static int ne6x_delete_pf_trunk(struct ne6x_pf *pf) +{ + return 0; +} + +static int ne6x_recycle_vp_resources(struct ne6x_pf *pf) +{ + struct ne6x_adapter *adpt; + int rst, i; + u64 reg; + + rst = ne6x_delete_pf_trunk(pf); + if (rst) + return rst; + + ne6x_disable_link_irq(pf); + ne6x_free_link_irq(pf); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_OPEN, adpt->comm.state)) + ne6x_adpt_close(adpt); + } + + reg = rd64_bar4(&pf->hw, 0x05300); + reg &= ~0xfc000; + reg |= 0x7c000; + wr64_bar4(&pf->hw, 0x05300, reg); + + return 0; +} + +static int ne6x_adpt_resetup(struct ne6x_pf *pf, bool recovery) +{ + int vid, pooling, i, actual_vector = 1, size; + struct device *dev = ne6x_pf_to_dev(pf); + union ne6x_ciu_time_out_cfg ciu_time_out_cdg; + union ne6x_all_rq_cfg all_rq_cfg; + union ne6x_all_sq_cfg all_sq_cfg; + union ne6x_all_cq_cfg all_cq_cfg; + union ne6x_merge_cfg merge_cfg; + struct ne6x_hw *hw = &pf->hw; + int qp_remaining, q_vectors; + struct ne6x_adapter *adpt = NULL; + u64 __iomem *reg; + + pooling = test_bit(NE6X_LINK_POOLING, pf->state); + if (pooling) + clear_bit(NE6X_LINK_POOLING, pf->state); + + if (test_bit(NE6X_PF_MSIX, pf->state)) { + pci_disable_msix(pf->pdev); + actual_vector = pci_enable_msix_range(pf->pdev, pf->msix_entries, NE6X_MIN_MSIX, + NE6X_MAX_MSIX_NUM); + if (actual_vector < NE6X_MAX_MSIX_NUM) { + clear_bit(NE6X_PF_MSIX, pf->state); + pci_disable_msix(pf->pdev); + dev_err(dev, "%s-%d: error msix enable failed\n", __func__, __LINE__); + } + + pf->irq_pile->num_entries = actual_vector; + } else { + if (!pf->irq_pile) { + size = struct_size(pf->irq_pile, list, actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(dev, "error intx allocating irq_pile memory\n"); + return -ENOMEM; + } + + pf->irq_pile->num_entries = actual_vector; + } + + test_and_set_bit(NE6X_PF_INTX, pf->state); + } + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_RQ_CFG); + all_rq_cfg.val = readq(reg); + all_rq_cfg.reg.csr_allrq_pull_merge_cfg = 0x10; + writeq(all_rq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_SQ_CFG); + all_sq_cfg.val = readq(reg); + all_sq_cfg.reg.csr_allsq_pull_merge_cfg = 0x10; + writeq(all_sq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_CQ_CFG); + all_cq_cfg.val = readq(reg); + all_cq_cfg.reg.csr_allcq_merge_size = 0x1; + all_cq_cfg.reg.csr_allcq_wt_rr_cnt = 0x7F; + all_cq_cfg.reg.csr_allcq_wt_rr_flag = 0x1; + writeq(all_cq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_MERGE_CFG); + merge_cfg.val = readq(reg); + merge_cfg.reg.csr_merge_clk_cnt = 800; + writeq(merge_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_CIU_TIME_OUT_CFG); + ciu_time_out_cdg.val = readq(reg); + ciu_time_out_cdg.reg.csr_int_timer_out_cnt = 0xfff; + writeq(ciu_time_out_cdg.val, reg); + + ne6x_for_each_pf(pf, vid) { + adpt = pf->adpt[vid]; + if (recovery) { + adpt->port_info->hw_queue_base = adpt->port_info->hw_queue_base_old; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->port_info->hw_queue_base = pf->hw.expect_vp * vid; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->base_vector = adpt->base_queue; + adpt->port_info->hw_max_queue = pf->hw.max_queue; + adpt->port_info->queue = adpt->port_info->hw_max_queue; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + } else { + adpt->port_info->hw_queue_base_old = adpt->port_info->hw_queue_base; + adpt->port_info->hw_queue_base = NE6X_PF_VP1_NUM + vid; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->base_vector = adpt->base_queue; + adpt->port_info->hw_max_queue = 1u; + adpt->port_info->queue = 1u; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + } + + for (i = 0; i < adpt->num_queue; i++) { + adpt->rx_rings[i]->reg_idx = adpt->base_queue + i; + adpt->cq_rings[i]->reg_idx = adpt->rx_rings[i]->reg_idx; + adpt->tx_rings[i]->reg_idx = adpt->cq_rings[i]->reg_idx; + } + + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = + DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = + adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + ne6x_adpt_reset_stats(adpt); + ne6x_dev_set_vport(adpt); + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = + ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + ne6x_dev_set_port2pi(adpt); + rtnl_lock(); + + if (test_bit(NE6X_ADPT_OPEN, adpt->comm.state)) + ne6x_adpt_open(adpt); + + rtnl_unlock(); + } + + ne6x_init_link_irq(pf); + ne6x_enable_link_irq(pf); + + if (pooling) { + set_bit(NE6X_LINK_POOLING, pf->state); + ne6x_linkscan_schedule(pf); + } + + return 0; +} + +static int ne6x_ena_vfs(struct ne6x_pf *pf, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + int ret; + + ret = ne6x_recycle_vp_resources(pf); + if (ret) + goto err_pci_disable_sriov; + + ret = ne6x_adpt_resetup(pf, false); + if (ret) + goto err_pci_disable_sriov; + + ne6x_clr_vf_bw_for_max_vpnum(pf); + ret = ne6x_alloc_vfs(pf, num_vfs); + if (ret) + goto err_pci_disable_sriov; + + if (ne6x_set_per_vf_res(pf)) { + dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", + num_vfs); + ret = -ENOSPC; + goto err_unroll_sriov; + } + + ne6x_set_dflt_settings_vfs(pf); + if (ne6x_start_vfs(pf, num_vfs)) { + dev_err(dev, "Failed to start VF(s)\n"); + ret = -EAGAIN; + goto err_unroll_sriov; + } + + ne6x_init_mailbox_irq(pf); + ne6x_send_init_mbx_mesg(pf); + clear_bit(NE6X_VF_DIS, pf->state); + + return 0; + +err_unroll_sriov: + kfree(pf->vf); + pf->vf = NULL; + pf->num_alloc_vfs = 0; +err_pci_disable_sriov: + pci_disable_sriov(pf->pdev); + + return ret; +} + +static int ne6x_pci_sriov_ena(struct ne6x_pf *pf, int num_vfs) +{ + int pre_existing_vfs = pci_num_vf(pf->pdev); + struct device *dev = ne6x_pf_to_dev(pf); + int err; + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + ne6x_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return 0; + + if (num_vfs > NE6X_MAX_VP_NUM) { + dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", num_vfs, + NE6X_MAX_VP_NUM); + return -EOPNOTSUPP; + } + + err = ne6x_ena_vfs(pf, num_vfs); + if (err) { + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + return err; + } + + if (num_vfs) + test_and_set_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + + return 0; +} + +int ne6x_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ne6x_pf *pf = pci_get_drvdata(pdev); + struct ne6x_adapter *adpt = NULL; + struct ne6x_vf *vf = NULL; + pbmp_t port_bitmap; + int err = 0, vf_id; + int timeout = 50; + int status; + + if (!(num_vfs == 0 || num_vfs == 2 || num_vfs == 4 || num_vfs == 8 || + num_vfs == 16 || num_vfs == 32 || num_vfs == 64)) + return -EINVAL; + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) { + dev_err(ne6x_pf_to_dev(pf), "ne6x irq number < %d!\n", NE6X_MAX_MSIX_NUM); + return -EPERM; + } + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + dev_warn(ne6x_pf_to_dev(pf), "ne6x config busy, timeout!\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + if (!num_vfs) { + set_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + if (!pci_vfs_assigned(pdev)) { + ne6x_free_vfs(pf); + ne6x_disable_mailbox_irq(pf); + ne6x_free_mailbox_irq(pf); + ne6x_mbx_deinit_snapshot(&pf->hw); + if (test_bit(NE6X_FLAG_SRIOV_ENA, pf->state)) + clear_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + + if (!test_bit(NE6X_REMOVE, pf->state)) { + ne6x_recycle_vp_resources(pf); + err = ne6x_adpt_resetup(pf, true); + } + + clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + if (err) + goto err_recovery; + + return 0; + } + + clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return -EBUSY; + } + + status = ne6x_mbx_init_snapshot(&pf->hw, num_vfs); + if (status) + return ne6x_status_to_errno(status); + + err = ne6x_pci_sriov_ena(pf, num_vfs); + if (err) { + ne6x_mbx_deinit_snapshot(&pf->hw); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return err; + } + + PBMP_CLEAR(port_bitmap); + + /* config vport, default vlan */ + ne6x_for_each_vf(pf, vf_id) { + vf = &pf->vf[vf_id]; + adpt = vf->adpt; + + /* config default vlan */ + PBMP_PORT_ADD(port_bitmap, adpt->vport); + ne6x_dev_set_vport(adpt); + adpt->hw_feature = ne6x_dev_get_features(adpt); + } + + err = pci_enable_sriov(pf->pdev, num_vfs); + if (err) + goto err_hanler; + + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return num_vfs; + +err_hanler: + ne6x_dev_clear_vport(pf); + /* config vport, default vlan */ + ne6x_for_each_pf(pf, vf_id) { + adpt = pf->adpt[vf_id]; + adpt->port_info->hw_queue_base = adpt->port_info->hw_queue_base_old; + ne6x_dev_set_vport(adpt); + } + + if (!pci_vfs_assigned(pdev)) { + ne6x_mbx_deinit_snapshot(&pf->hw); + ne6x_free_vfs(pf); + pf->num_alloc_vfs = 0; + if (test_bit(NE6X_FLAG_SRIOV_ENA, pf->state)) + clear_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + } + +err_recovery: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return err; +} + +static int ne6x_validate_vf_id(struct ne6x_pf *pf, u16 vf_id) +{ + /* vf_id range is only valid for 0-255, and should always be unsigned */ + if (vf_id >= pf->num_alloc_vfs) + return -EINVAL; + + return 0; +} + +static int ne6x_validate_outer_vf_id(struct ne6x_pf *pf, u16 out_vf_id) +{ + if (out_vf_id >= (pf->num_alloc_vfs / pf->num_alloc_adpt)) + return -EINVAL; + + return 0; +} + +int ne6x_sdk_send_msg_to_vf(struct ne6x_hw *hw, u16 vfid, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_pf *pf = hw->back; + struct ne6x_vf *vf = &pf->vf[vfid]; + int timeout = 2000; + int i; + + usnap.snap.state = v_retval; + usnap.snap.len = msglen; + usnap.snap.type = v_opcode; + + for (i = 0; i < msglen && i < 6; i++) + usnap.snap.data[i] = msg[i]; + + while (!(pf->hw.ne6x_mbx_ready_to_send[vfid])) { + usleep_range(100, 200); + timeout--; + if (!timeout) + break; + } + + wr64_bar4(hw, NE6X_PF_MAILBOX_ADDR(vf->base_queue), usnap.val); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_INT_REQ), (1ULL << vf->base_queue)); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_TRAVERSE; + pf->hw.ne6x_mbx_ready_to_send[vfid] = false; + + return 0; +} + +static int ne6x_vc_send_msg_to_vf(struct ne6x_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, + u8 *msg, u16 msglen) +{ + struct device *dev; + struct ne6x_pf *pf; + int aq_ret; + + if (!vf) + return -EINVAL; + + pf = vf->pf; + dev = ne6x_pf_to_dev(pf); + + if (ne6x_validate_vf_id(pf, vf->vf_id)) { + dev_err(dev, "vf id[%d] is invalid\n", vf->vf_id); + return -EINVAL; + } + + /* single place to detect unsuccessful return values */ + if (v_retval) + dev_info(dev, "VF %d failed opcode %s, retval: %s\n", vf->vf_id, + ne6x_opcode_str(v_opcode), ne6x_mbox_status_str(v_retval)); + + aq_ret = ne6x_sdk_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen); + if (aq_ret) { + dev_info(dev, "Unable to send the message to VF %d aq_err %d\n", vf->vf_id, aq_ret); + return -EIO; + } + + return 0; +} + +static int ne6x_check_vf_init(struct ne6x_pf *pf, struct ne6x_vf *vf) +{ + if (!test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + dev_err(ne6x_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", vf->vf_id); + return -EBUSY; + } + + return 0; +} + +static int ne6x_vc_add_def_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + struct device *dev = ne6x_pf_to_dev(vf->pf); + u8 *mac_addr = vc_ether_addr->addr; + + if (!is_unicast_ether_addr(mac_addr)) { + dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + return -EPERM; + } + + if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) { + dev_err(dev, "vf already use the same addr\n"); + return -EPERM; + } + + ether_addr_copy(vf->dev_lan_addr.addr, mac_addr); + ne6x_adpt_add_mac(adpt, mac_addr, true); + + return 0; +} + +static int ne6x_vc_del_def_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, u8 *mac) +{ + return ne6x_adpt_del_mac(adpt, mac, true); +} + +static int ne6x_vc_get_vf_res_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *vfres = NULL; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *pf_adpt; + int len, ret; + + if (ne6x_check_vf_init(pf, vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + pf_adpt = vf->adpt; + + ne6x_vc_add_def_mac_addr(vf, pf_adpt, &vc_ether_addr); + + len = sizeof(union u_ne6x_mbx_snap_buffer_data); + vfres = kzalloc(len, GFP_KERNEL); + if (!vfres) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + len = 0; + goto err; + } + + vfres->snap.type = VIRTCHNL_OP_GET_VF_RESOURCES; + vfres->snap.data[0] = vf->vf_id; /* vport */ + vfres->snap.data[1] = pf_adpt->port_info->lport; /* lport */ + vfres->snap.data[2] = pf_adpt->port_info->hw_port_id; /* pport */ + vfres->snap.data[3] = pf_adpt->port_info->hw_queue_base; /* base_queue */ + vfres->snap.data[4] = pf->num_qps_per_vf; /* num_qps_per_vf */ + vfres->snap.data[5] = pf->num_alloc_vfs / pf->num_alloc_adpt; /* num vfs of per hw_port */ + vfres->snap.len = 6; + vf->ready = 0; + vf->adpt->port_info->phy.link_info.link_info = 0; + vf->ready_to_link_notify = 0; + set_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states); + +err: + /* send the response back to the VF */ + vfres->snap.state = v_ret; + ret = ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, + vfres->snap.state, + (u8 *)vfres->snap.data, + vfres->snap.len); + + return ret; +} + +static int ne6x_vc_add_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + u8 *mac_addr = vc_ether_addr->addr; + int ret = 0; + + if (likely(is_multicast_ether_addr(mac_addr))) { + if (is_broadcast_ether_addr(mac_addr)) + return 0; + + ne6x_adpt_add_mac(adpt, mac_addr, false); + } else { + ne6x_adpt_add_mac(adpt, mac_addr, true); + } + + return ret; +} + +static int ne6x_vc_del_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + u8 *mac_addr = vc_ether_addr->addr; + int ret = 0; + + if (likely(is_multicast_ether_addr(mac_addr))) { + if (is_broadcast_ether_addr(mac_addr)) + return 0; + + ne6x_adpt_del_mac(adpt, mac_addr, false); + } else { + ne6x_adpt_del_mac(adpt, mac_addr, true); + } + + return ret; +} + +static int ne6x_vc_handle_mac_addr_msg(struct ne6x_vf *vf, u8 *msg, bool set) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *virtchnl_ether_addr); + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *usnap; + struct virtchnl_ether_addr eth_addr; + enum virtchnl_ops vc_op; + struct ne6x_adapter *adpt; + u8 *mac_addr; + int result; + + if (set) { + vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; + ne6x_vc_cfg_mac = ne6x_vc_add_mac_addr; + } else { + vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; + ne6x_vc_cfg_mac = ne6x_vc_del_mac_addr; + } + + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + mac_addr = usnap->snap.data; + + if (is_broadcast_ether_addr(mac_addr) || is_zero_ether_addr(mac_addr)) + goto handle_mac_exit; + + if (ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) + goto handle_mac_exit; + + ether_addr_copy(eth_addr.addr, mac_addr); + result = ne6x_vc_cfg_mac(vf, adpt, ð_addr); + if (result == -EEXIST || result == -ENOENT) { + goto handle_mac_exit; + } else if (result) { + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + goto handle_mac_exit; + } + +handle_mac_exit: + /* send the response to the VF */ + return ne6x_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); +} + +static int ne6x_vc_add_mac_addr_msg(struct ne6x_vf *vf, u8 *msg) +{ + return ne6x_vc_handle_mac_addr_msg(vf, msg, true); +} + +static int ne6x_vc_del_mac_addr_msg(struct ne6x_vf *vf, u8 *msg) +{ + return ne6x_vc_handle_mac_addr_msg(vf, msg, false); +} + +static int ne6x_vf_set_adpt_promisc(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + u8 promisc_m) +{ + int status = 0; + + dev_info(ne6x_pf_to_dev(adpt->back), "%s: adpt->vport = %d enable promiscuous <%s>\n", + __func__, adpt->vport, + (promisc_m & NE6X_UCAST_PROMISC_BITS) ? "unicast" : "multicast"); + + if (promisc_m & NE6X_UCAST_PROMISC_BITS) + status = ne6x_dev_set_uc_promiscuous_enable(adpt, true); + + if (promisc_m & NE6X_MCAST_PROMISC_BITS) + status = ne6x_dev_set_mc_promiscuous_enable(adpt, true); + + if (status) { + dev_err(ne6x_pf_to_dev(adpt->back), "disable Tx/Rx filter promiscuous mode off VF-%u mac: %d, trunk: 0x%x, failed, error: %d\n", + vf->vf_id, 0, adpt->port_info->hw_trunk_id, status); + return status; + } + + return 0; +} + +static int ne6x_vf_clear_adpt_promisc(struct ne6x_vf *vf, struct ne6x_adapter *adpt, u8 promisc_m) +{ + int status = 0; + + dev_info(ne6x_pf_to_dev(adpt->back), "%s: adpt->vport = %d clear promiscuous <%s>\n", + __func__, adpt->vport, + (promisc_m & NE6X_UCAST_PROMISC_BITS) ? "unicast" : "multicast"); + + if (promisc_m & NE6X_UCAST_PROMISC_BITS) + status = ne6x_dev_set_uc_promiscuous_enable(adpt, false); + + if (promisc_m & NE6X_MCAST_PROMISC_BITS) + status = ne6x_dev_set_mc_promiscuous_enable(adpt, false); + + if (status) { + dev_err(ne6x_pf_to_dev(adpt->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; +} + +static int ne6x_vc_cfg_promiscuous_mode_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)usnap->snap.data; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + bool alluni = false, allmulti = false; + int ucast_err = 0, mcast_err = 0; + struct ne6x_pf *pf = vf->pf; + u8 mcast_m, ucast_m; + struct ne6x_adapter *adpt; + struct device *dev; + + if (!test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + dev = ne6x_pf_to_dev(pf); + + if (info->flags & FLAG_VF_UNICAST_PROMISC) + alluni = true; + + if (info->flags & FLAG_VF_MULTICAST_PROMISC) + allmulti = true; + + mcast_m = NE6X_MCAST_PROMISC_BITS; + ucast_m = NE6X_UCAST_PROMISC_BITS; + + if (alluni) + ucast_err = ne6x_vf_set_adpt_promisc(vf, adpt, ucast_m); + else + ucast_err = ne6x_vf_clear_adpt_promisc(vf, adpt, ucast_m); + + if (allmulti) + mcast_err = ne6x_vf_set_adpt_promisc(vf, adpt, mcast_m); + else + mcast_err = ne6x_vf_clear_adpt_promisc(vf, adpt, mcast_m); + + if (!mcast_err) { + if (allmulti && !test_and_set_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", + vf->vf_id); + else if (!allmulti && test_and_clear_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", + vf->vf_id); + } + + if (!ucast_err) { + if (alluni && !test_and_set_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", + vf->vf_id); + else if (!alluni && test_and_clear_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", + vf->vf_id); + } + +error_param: + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, v_ret, NULL, 0); +} + +static bool ne6x_is_vf_link_up(struct ne6x_vf *vf) +{ + struct ne6x_port_info *pi = ne6x_vf_get_port_info(vf); + struct ne6x_pf *pf = vf->pf; + + if (ne6x_check_vf_init(pf, vf)) + return false; + + if (vf->link_forced) + return vf->link_up; + else + return pi->phy.link_info.link_info & NE6X_AQ_LINK_UP; +} + +u32 ne6x_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + u32 speed; + + switch (link_speed) { + case NE6X_LINK_SPEED_10GB: + speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + speed = NE6X_LINK_SPEED_100GB; + break; + default: + speed = NE6X_LINK_SPEED_UNKNOWN; + break; + } + + return speed; +} + +static void ne6x_set_pfe_link(struct ne6x_vf *vf, struct virtchnl_pf_event *pfe, + int ne6x_link_speed, bool link_up) +{ + pfe->link_status = link_up; + /* Speed in Mbps */ + if (link_up && vf->link_forced) + ne6x_link_speed = NE6X_LINK_SPEED_25GB; + + pfe->link_speed = ne6x_conv_link_speed_to_virtchnl(true, ne6x_link_speed); +} + +void ne6x_vc_notify_vf_link_state(struct ne6x_vf *vf) +{ + struct virtchnl_pf_event pfe = {0}; + struct ne6x_hw *hw = &vf->pf->hw; + struct ne6x_port_info *pi; + u8 data[6] = {0}; + + pi = ne6x_vf_get_port_info(vf); + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + + if (ne6x_is_vf_link_up(vf)) + ne6x_set_pfe_link(vf, &pfe, pi->phy.link_info.link_speed, true); + else + ne6x_set_pfe_link(vf, &pfe, NE6X_LINK_SPEED_UNKNOWN, false); + + data[0] = pfe.event; + data[1] = (pfe.link_speed >> 24) & 0xff; + data[2] = (pfe.link_speed >> 16) & 0xff; + data[3] = (pfe.link_speed >> 8) & 0xff; + data[4] = (pfe.link_speed >> 0) & 0xff; + data[5] = pfe.link_status; + + ne6x_sdk_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 6); +} + +void ne6x_vc_notify_link_state(struct ne6x_vf *vf) +{ + if (vf->ready_to_link_notify) + ne6x_vc_notify_vf_link_state(vf); +} + +static void ne6x_vc_notify_vf_reset(struct ne6x_vf *vf) +{ + struct virtchnl_pf_event pfe; + struct ne6x_pf *pf; + u8 data[6] = {0}; + + if (!vf) + return; + + pf = vf->pf; + if (ne6x_validate_vf_id(pf, vf->vf_id)) + return; + + /* Bail out if VF is in disabled state, neither initialized, nor active + * state - otherwise proceed with notifications + */ + if ((!test_bit(NE6X_VF_STATE_INIT, vf->vf_states) && + !test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) || + test_bit(NE6X_VF_STATE_DIS, vf->vf_states)) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + data[0] = pfe.event; + ne6x_sdk_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 1); +} + +static void ne6x_vc_notify_vf_trust_change(struct ne6x_vf *vf) +{ + struct virtchnl_vf_config vfconfig = {0}; + struct ne6x_hw *hw = &vf->pf->hw; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + u8 data[6] = {0}; + + dev = ne6x_pf_to_dev(pf); + vfconfig.type = VIRTCHNL_VF_CONFIG_TRUST; + if (test_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag)) + vfconfig.data[0] = 1; + else + vfconfig.data[0] = 0; + + data[0] = vfconfig.type; + data[1] = vfconfig.data[0]; + dev_info(dev, "vfconfig_type = %d,data = %d\n", data[0], data[1]); + ne6x_sdk_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_VF_CONFIG, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 2); +} + +bool ne6x_reset_vf(struct ne6x_vf *vf, bool is_vflr) +{ + struct ne6x_adapter *adpt; + + adpt = ne6x_get_vf_adpt(vf); + + if (test_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states)) + ne6x_dis_vf_qs(vf); + + if (test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + clear_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states); + adpt->port_info->phy.link_info.link_info = 0x0; + if (is_vflr) + vf->rx_tx_state = false; + } + + if (test_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + clear_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states); + + if (test_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + clear_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states); + + return 0; +} + +static void ne6x_vc_reset_vf(struct ne6x_vf *vf, bool update_tx_rx) +{ + ne6x_vc_notify_vf_reset(vf); + ne6x_reset_vf(vf, update_tx_rx); +} + +static int ne6x_vc_request_qs_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + u16 req_queues = (usnap->snap.data[1] << 8) | usnap->snap.data[0]; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u16 max_avail_vf_qps, max_allowed_vf_qps; + u8 req_reset = usnap->snap.data[2]; + bool need_update_rx_tx = false; + struct ne6x_pf *pf = vf->pf; + u16 tx_rx_queue_left; + u16 num_queue_pairs; + struct device *dev; + u16 cur_queues; + + ne6x_clear_vf_status(vf); + dev = ne6x_pf_to_dev(pf); + + if (!test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + max_allowed_vf_qps = pf->num_qps_per_vf; + cur_queues = vf->num_vf_qs; + tx_rx_queue_left = cur_queues; + max_avail_vf_qps = tx_rx_queue_left + cur_queues; + + if (!req_queues) { + dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", vf->vf_id); + } else if (req_queues > max_allowed_vf_qps) { + dev_err(dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, + max_allowed_vf_qps); + num_queue_pairs = max_allowed_vf_qps; + } else if (req_queues > cur_queues && req_queues - cur_queues > tx_rx_queue_left) { + dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, + req_queues - cur_queues, tx_rx_queue_left); + num_queue_pairs = min_t(u16, max_avail_vf_qps, max_allowed_vf_qps); + } else { + if (req_queues != vf->num_req_qs) { + vf->num_req_qs = req_queues; + need_update_rx_tx = true; + } + if (req_reset) { + ne6x_vc_reset_vf(vf, need_update_rx_tx); + } else { + vf->ready = false; + if (need_update_rx_tx) + vf->rx_tx_state = false; + + vf->adpt->port_info->phy.link_info.link_info = 0x0; + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); + } + + return 0; + } + +error_param: + /* send the response to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, v_ret, (u8 *)&num_queue_pairs, + 2); +} + +static int ne6x_vc_config_mtu_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + u16 *mtu; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + mtu = (u16 *)(rsvsnap->snap.data); + + dev = ne6x_pf_to_dev(pf); + dev_info(dev, "%s: mtu = %d\n", __func__, *mtu); + ne6x_dev_set_mtu(adpt, *mtu); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_MTU, v_ret, NULL, 0); +} + +struct virtchnl_vlan_info { + u16 vlan_id; + s16 flags; +}; + +static int ne6x_vc_config_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_vlan_info *dpdk_vlan; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + struct ne6x_vlan vlan; + int ret; + + dev = ne6x_pf_to_dev(pf); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + dpdk_vlan = (struct virtchnl_vlan_info *)rsvsnap->snap.data; + if (dpdk_vlan->flags) { + dev_info(dev, "%s: flags = %d vlan id = %d\n", __func__, dpdk_vlan->flags, + dpdk_vlan->vlan_id); + + vlan = NE6X_VLAN(ETH_P_8021Q, dpdk_vlan->vlan_id, 0); + ret = ne6x_adpt_add_vlan(adpt, vlan); + if (!ret) { + dev_info(dev, "%s: add vlan id success\n", __func__); + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } else { + dev_info(dev, "%s: add vlan id failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } + } else { + dev_info(dev, "%s: flags = %d vlan id = %d\n", __func__, dpdk_vlan->flags, + dpdk_vlan->vlan_id); + + vlan = NE6X_VLAN(ETH_P_8021Q, dpdk_vlan->vlan_id, 0); + ret = ne6x_adpt_del_vlan(adpt, vlan); + if (ret) { + dev_info(dev, "%s: del vlan id failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } else { + dev_info(dev, "%s: del vlan id success\n", __func__); + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } + } + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VLAN, v_ret, NULL, 0); +} + +#define ETH_VLAN_STRIP_MASK 0x0001 +#define ETH_VLAN_FILTER_MASK 0x0002 +#define ETH_QINQ_STRIP_MASK 0x0008 +#define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001 +#define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020 +#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200 + +struct virtchnl_vlan_offload_info { + u16 mask; + u16 feature; +}; + +static int ne6x_vc_config_vlan_offload_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_vlan_offload_info *offload; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + + dev = ne6x_pf_to_dev(pf); + adpt->hw_feature = ne6x_dev_get_features(adpt); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + offload = (struct virtchnl_vlan_offload_info *)rsvsnap->snap.data; + + if (offload->mask & ETH_VLAN_FILTER_MASK) { + dev_info(dev, "%s: ETH_VLAN_FILTER_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_VLAN_FILTER) { + dev_info(dev, "%s: ETH_VLAN_FILTER ON\n", __func__); + adpt->hw_feature |= (NE6X_F_RX_VLAN_FILTER); + } else { + dev_info(dev, "%s: ETH_VLAN_FILTER OFF\n", __func__); + adpt->hw_feature &= ~(NE6X_F_RX_VLAN_FILTER); + } + } + + if (offload->mask & ETH_VLAN_STRIP_MASK) { + dev_info(dev, "%s: ETH_VLAN_STRIP_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_VLAN_STRIP) { + dev_info(dev, "%s: ETH_VLAN_STRIP ON\n", __func__); + adpt->hw_feature |= NE6X_F_RX_VLAN_STRIP; + } else { + dev_info(dev, "%s: ETH_VLAN_STRIP OFF\n", __func__); + adpt->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + } + } + + if (offload->mask & ETH_QINQ_STRIP_MASK) { + dev_info(dev, "%s: ETH_QINQ_STRIP_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_QINQ_STRIP) { + dev_info(dev, "%s: ETH_QINQ_STRIP ON\n", __func__); + adpt->hw_feature |= NE6X_F_RX_QINQ_STRIP; + } else { + dev_info(dev, "%s: ETH_QINQ_STRIP OFF\n", __func__); + adpt->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + } + } + + ne6x_dev_set_features(adpt, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD, v_ret, NULL, 0); +} + +struct virtchnl_flow_ctrl_info { + u16 mode; + u16 high_water; +}; + +enum rte_eth_fc_mode { + RTE_FC_NONE = 0, /**< Disable flow control. */ + RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */ + RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */ + RTE_FC_FULL /**< Enable flow control on both side. */ +}; + +static int ne6x_vc_config_flow_ctrl_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_flow_ctrl_info *flow; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_flowctrl flowctrl; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + int ret; + + dev = ne6x_pf_to_dev(pf); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + flow = (struct virtchnl_flow_ctrl_info *)rsvsnap->snap.data; + if (flow->mode == RTE_FC_FULL) { + flowctrl.rx_pause = 1; + flowctrl.tx_pause = 1; + } else if (flow->mode == RTE_FC_RX_PAUSE) { + flowctrl.rx_pause = 1; + } else if (flow->mode == RTE_FC_TX_PAUSE) { + flowctrl.tx_pause = 1; + } else { + flowctrl.rx_pause = 0; + flowctrl.tx_pause = 0; + } + + dev_info(dev, "%s: mode = %d high water = %d\n", __func__, flow->mode, flow->high_water); + ret = ne6x_dev_set_flowctrl(adpt, &flowctrl); + if (ret) { + dev_info(dev, "%s: set flow ctrl failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } + + ret = ne6x_dev_set_vf_bw(adpt, flow->high_water); + if (ret) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_FLOW_CTRL, v_ret, NULL, 0); +} + +static int ne6x_vc_config_rss_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + u8 *data = (u8 *)&adpt->rss_info; + int i; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + + for (i = 0; i < rsvsnap->snap.len; i++) { + data[adpt->rss_size] = rsvsnap->snap.data[i]; + adpt->rss_size++; + } + + if (adpt->rss_size >= sizeof(struct ne6x_rss_info)) { + adpt->rss_size = 0; + ne6x_dev_set_rss(adpt, &adpt->rss_info); + } + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS, v_ret, NULL, 0); +} + +static int ne6x_vc_changed_rss_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + int i, ret; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + memcpy(&adpt->num_queue, rsvsnap->snap.data, sizeof(adpt->num_queue)); + + if (adpt->rss_info.ind_table_size > NE6X_RSS_MAX_IND_TABLE_SIZE) + adpt->rss_info.ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = ethtool_rxfh_indir_default(i, adpt->num_queue); + + ret = ne6x_dev_set_rss(adpt, &adpt->rss_info); + ret |= ne6x_dev_add_unicast_for_fastmode(adpt, vf->dev_lan_addr.addr); + ret |= ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CHANGED_RSS, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); + + return ret; +} + +static int ne6x_vc_add_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_vlan vlan; + u16 vlan_tpid = 0; + u16 vlan_id = 0; + + vlan_id = *((u16 *)msg); + vlan_tpid = *((u16 *)(msg + 2)); + dev_info(&vf->pf->pdev->dev, "%s:vlan tpid:%04x,vlan id:%04x\n", + __func__, vlan_tpid, vlan_id); + + vlan = NE6X_VLAN(vlan_tpid, vlan_id, 0); + + dev_info(&vf->pf->pdev->dev, "%s:vfp_vid %04x\n", __func__, vf->vfp_vid); + + ne6x_adpt_add_vlan(vf->adpt, vlan); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_vc_del_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_vlan vlan; + u16 vlan_tpid = 0; + u16 vlan_id = 0; + + vlan_id = *((u16 *)msg); + vlan_tpid = *((u16 *)(msg + 2)); + + dev_info(&vf->pf->pdev->dev, "%s:vlan tpid:%04x,vlan id:%04x\n", __func__, vlan_tpid, + vlan_id); + vlan = NE6X_VLAN(vlan_tpid, vlan_id, 0); + + ne6x_adpt_del_vlan(vf->adpt, vlan); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_vc_config_offload_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + struct ne6x_adapter *adpt = vf->adpt; + + adpt->hw_feature = rsvsnap->snap.data[3]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[2]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[1]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[0]; + + if (vf->tx_rate) + adpt->hw_feature |= NE6X_F_TX_QOSBANDWIDTH; + else + adpt->hw_feature &= ~NE6X_F_TX_QOSBANDWIDTH; + + ne6x_dev_set_features(adpt, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_OFFLOAD, VIRTCHNL_STATUS_SUCCESS, NULL, + 0); +} + +static int ne6x_vc_request_feature_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_adapter *adpt = vf->adpt; + + adpt->hw_feature = ne6x_dev_get_features(adpt); + dev_info(&vf->pf->pdev->dev, "%s: vf->vf_id =%d vport = %d lport = %d pport = %d hw_queue_base = %d hw_feature = %08X\n", + __func__, vf->vf_id, adpt->vport, adpt->port_info->lport, + adpt->port_info->hw_port_id, adpt->port_info->hw_queue_base, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_FEATURE, VIRTCHNL_STATUS_SUCCESS, + (u8 *)&adpt->hw_feature, sizeof(u32)); +} + +static int ne6x_vc_reset_vf_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + + vf->ready = false; + vf->rx_tx_state = 0; + vf->adpt->port_info->phy.link_info.link_info = false; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + ne6x_dev_set_features(vf->adpt, 0); + ne6x_dev_del_vf_qinq(vf, 0, 0); + + vf->port_vlan_info = NE6X_VLAN(0, 0, 0); + vf->link_forced = false; + vf->trusted = false; + vf->tx_rate = 0; + clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_dev_del_broadcast_leaf(ne6x_get_vf_adpt(vf)); + ne6x_adpt_clear_mac_vlan(ne6x_get_vf_adpt(vf)); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_RESET_VF, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_get_logic_vf_id(struct net_device *netdev, int vf_id) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + + return (adpt->idx * (pf->num_alloc_vfs / pf->num_alloc_adpt) + vf_id); +} + +int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + struct ne6x_vf *vf; + int logic_vf_id; + int ret = 0; + + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + return ret; + + logic_vf_id = ne6x_get_logic_vf_id(netdev, vf_id); + + if (logic_vf_id >= pf->num_alloc_vfs) + return -EINVAL; + + vf = ne6x_get_vf_by_id(pf, logic_vf_id); + + netdev_info(netdev, "set vf-%d trust %s\n", vf_id, trusted ? "on" : "off"); + + if (!vf) { + netdev_err(netdev, "vf is NULL\n"); + return -EINVAL; + } + + /* Check if already ready ?*/ + if (!vf->ready) { + netdev_err(netdev, "vf is not ready\n"); + return (-1); + } + + /* Check if already trusted */ + if (trusted == vf->trusted) + return 0; + + vf->trusted = trusted; + + if (vf->trusted) { + set_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + } else { + clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_vf_clear_adpt_promisc(vf, ne6x_get_vf_adpt(vf), + NE6X_UCAST_PROMISC_BITS | + NE6X_MCAST_PROMISC_BITS); + } + + ne6x_vc_notify_vf_trust_change(vf); + dev_info(ne6x_pf_to_dev(pf), "VF %u is now %strusted\n", + logic_vf_id, trusted ? "" : "un"); + + return 0; +} + +int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + int ret = 0, logic_vf_id; + struct ne6x_vf *vf; + + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + return ret; + + logic_vf_id = ne6x_get_logic_vf_id(netdev, vf_id); + + vf = ne6x_get_vf_by_id(pf, logic_vf_id); + if (!vf) + return -EINVAL; + + netdev_info(netdev, "set vf-%d link state %s\n", vf_id, + link_state == IFLA_VF_LINK_STATE_ENABLE + ? "enable" + : (link_state == IFLA_VF_LINK_STATE_DISABLE ? "disable" : "auto")); + + /* Check if already ready ?*/ + if (!vf->ready) + return (-1); + + if (!vf->trusted) + return (-1); + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + break; + default: + ret = -EINVAL; + goto out_put_vf; + } + + ne6x_vc_notify_vf_link_state(vf); + +out_put_vf: + return ret; +} + +static int ne6x_vc_modify_vf_mac(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *pf_adpt; + + if (ne6x_check_vf_init(pf, vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + pf_adpt = vf->adpt; + if (!pf->adpt) + dev_info(ne6x_pf_to_dev(pf), "adpt is null vf %d\n", vf->vf_id); + + /* set zero addr mean clear mac */ + if (is_zero_ether_addr(vc_ether_addr.addr)) + return ne6x_vc_del_def_mac_addr(vf, pf_adpt, vf->dev_lan_addr.addr); + + if (is_valid_ether_addr(vf->dev_lan_addr.addr)) { + ne6x_vc_del_def_mac_addr(vf, pf_adpt, vf->dev_lan_addr.addr); + memset(vf->dev_lan_addr.addr, 0, 6); + } + + ne6x_vc_add_def_mac_addr(vf, pf_adpt, &vc_ether_addr); + +err: + /* send the response back to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_VF_ADDR, v_ret, vc_ether_addr.addr, 6); +} + +static int ne6x_vc_set_fast_mode(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_pf *pf = vf->pf; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + + if (rsvsnap->snap.data[0]) { + vf->adpt->num_queue = rsvsnap->snap.data[1]; + v_ret = ne6x_dev_set_fast_mode(pf, true, vf->adpt->num_queue); + } else { + v_ret = ne6x_dev_set_fast_mode(pf, false, 0); + } + + /* send the response back to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_FAST_MDOE, v_ret, NULL, 0); +} + +void ne6x_vc_process_vf_msg(struct ne6x_pf *pf) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_hw *hw = &pf->hw; + struct ne6x_vf *vf = NULL; + struct ne6x_vlan vlan; + struct device *dev; + int err = 0; + int i; + + dev = ne6x_pf_to_dev(pf); + ne6x_for_each_vf(pf, i) { + if (pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i]) { + vf = &pf->vf[i]; + usnap.val = rd64_bar4(hw, NE6X_VF_MAILBOX_ADDR(vf->base_queue)); + WARN(usnap.snap.len > 6, ">>>>>>>>>>>>>>>>>>recv VF mailbox error!!!<<<<<<<<<<<<<<<<<<<"); + switch (usnap.snap.type) { + case VIRTCHNL_OP_GET_VF_RESOURCES: + err = ne6x_vc_get_vf_res_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + vf->ready = 1; + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + err = ne6x_vc_add_mac_addr_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + err = ne6x_vc_del_mac_addr_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_ADD_VLAN: + err = ne6x_vc_add_vlan_msg(vf, (u8 *)&usnap.snap.data); + break; + case VIRTCHNL_OP_DEL_VLAN: + err = ne6x_vc_del_vlan_msg(vf, (u8 *)&usnap.snap.data); + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + ne6x_vc_cfg_promiscuous_mode_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_EVENT: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + err = ne6x_vc_request_qs_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_RSS: + err = ne6x_vc_config_rss_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_VLAN: + err = ne6x_vc_config_vlan_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD: + err = ne6x_vc_config_vlan_offload_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_MTU: + err = ne6x_vc_config_mtu_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_FLOW_CTRL: + err = ne6x_vc_config_flow_ctrl_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CHANGED_RSS: + err = ne6x_vc_changed_rss_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_OFFLOAD: + err = ne6x_vc_config_offload_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_GET_VF_FEATURE: + err = ne6x_vc_request_feature_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_RESET_VF: + err = ne6x_vc_reset_vf_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_GET_PORT_STATUS: + ne6x_dev_add_broadcast_leaf(ne6x_get_vf_adpt(vf)); + vlan = NE6X_VLAN(ETH_P_8021Q, 0xfff, 0); + ne6x_adpt_add_vlan(ne6x_get_vf_adpt(vf), vlan); + ne6x_vc_notify_vf_link_state(vf); + + if (!vf->ready_to_link_notify) + vf->ready_to_link_notify = 1; + + ne6x_linkscan_schedule(pf); + break; + case VIRTCHNL_OP_SET_VF_ADDR: + err = ne6x_vc_modify_vf_mac(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_SET_FAST_MDOE: + err = ne6x_vc_set_fast_mode(vf, (u8 *)&usnap); + break; + /* VIRTCHNL_OP_VERSION not used */ + default: + dev_err(dev, "Unsupported opcode %s from VF %d\n", + ne6x_opcode_str(usnap.snap.type), i); + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); + break; + } + pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i] = false; + } + if (err) + /* Helper function cares less about error return values here + * as it is busy with pending work. + */ + dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", i, + usnap.snap.type, err); + } + + if (test_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state)) + clear_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state); +} + +int ne6x_get_vf_config(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + struct ne6x_vf *vf; + int logic_vfid = 0; + int ret = 0; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error_param; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + /* first adpt is always the LAN adpt */ + adpt = pf->adpt[vf->lan_adpt_idx]; + if (!adpt) { + ret = -ENOENT; + goto error_param; + } + + ivi->vf = vf_id; + + ether_addr_copy(ivi->mac, vf->dev_lan_addr.addr); + + ivi->vlan = vf->port_vlan_info.vid; + ivi->qos = vf->port_vlan_info.prio; + if (vf->port_vlan_info.vid) + ivi->vlan_proto = cpu_to_be16(vf->port_vlan_info.tpid); + + if (!vf->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + if (test_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag)) + ivi->trusted = 1; + else + ivi->trusted = 0; + +error_param: + return ret; +} + +static void ne6x_calc_token_for_bw(int max_tx_rate, int *time_inv, int *tocken) +{ + if (max_tx_rate <= 100) { + *time_inv = 3910; + *tocken = max_tx_rate; + } else if (max_tx_rate <= 1000) { + *time_inv = 790; + *tocken = max_tx_rate / 5; + } else if (max_tx_rate < 5000) { + *time_inv = 395; + *tocken = max_tx_rate / 10; + } else if (max_tx_rate < 10000) { + *time_inv = 118; + *tocken = max_tx_rate / 33; + } else { + *time_inv = 39; + *tocken = max_tx_rate / 100; + } +} + +int ne6x_set_vf_bw_for_max_vpnum(struct ne6x_pf *pf, int vf_id, int max_tx_rate) +{ + union ne6x_sq_meter_cfg0 sq_meter_cfg0; + union ne6x_sq_meter_cfg1 sq_meter_cfg1; + union ne6x_sq_meter_cfg2 sq_meter_cfg2; + union ne6x_sq_meter_cfg3 sq_meter_cfg3; + struct ne6x_hw *hw = &pf->hw; + int time_inv = 0; + int tocken = 0; + + sq_meter_cfg3.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG3)); + sq_meter_cfg3.reg.csr_meter_pause_threshold_vp = 1; + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG3), sq_meter_cfg3.val); + sq_meter_cfg2.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG2)); + sq_meter_cfg2.reg.csr_meter_resume_threshold_vp = 1; + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG2), sq_meter_cfg2.val); + + sq_meter_cfg1.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG1)); + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = max_tx_rate; + + if (max_tx_rate) { + ne6x_calc_token_for_bw(max_tx_rate, &time_inv, &tocken); + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = tocken; + sq_meter_cfg1.reg.csr_meter_refresh_interval_vp = time_inv; + } else { + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = 0x1; + sq_meter_cfg1.reg.csr_meter_refresh_interval_vp = 0x1; + } + + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG1), sq_meter_cfg1.val); + sq_meter_cfg0.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG0)); + sq_meter_cfg0.reg.csr_meter_pkt_token_num_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_ipg_len_vp = 0x0; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_packet_mode_vp = 0x0; + + if (max_tx_rate) { + sq_meter_cfg0.reg.csr_meter_rate_limit_en_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x1; + } else { + sq_meter_cfg0.reg.csr_meter_rate_limit_en_vp = 0x0; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x0; + } + + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG0), sq_meter_cfg0.val); + + return 0; +} + +void ne6x_clr_vf_bw_for_max_vpnum(struct ne6x_pf *pf) +{ + int index; + + for (index = 0; index < NE6X_MAX_VP_NUM; index++) + ne6x_set_vf_bw_for_max_vpnum(pf, index, 0); +} + +int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_pf *pf = np->adpt->back; + struct ne6x_adapter *adpt; + struct ne6x_vf *vf; + int logic_vfid; + int ret; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + ret = -EINVAL; + goto error; + } + + ret = ne6x_validata_tx_rate(adpt, logic_vfid, min_tx_rate, max_tx_rate); + if (ret) { + ret = -EINVAL; + goto error; + } + + if (!test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", logic_vfid); + ret = -EAGAIN; + goto error; + } + + if (pf->num_alloc_vfs == 64) + ret = ne6x_set_vf_bw_for_max_vpnum(pf, logic_vfid, max_tx_rate); + else + ret = ne6x_dev_set_vf_bw(adpt, max_tx_rate); + + if (ret) + goto error; + + vf->tx_rate = max_tx_rate; + + return 0; +error: + return ret; +} + +int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + struct ne6x_vf *vf; + int logic_vfid; + int ret; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error_param; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + + adpt = ne6x_get_vf_adpt(vf); + if (!is_valid_ether_addr(mac)) { + dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + ret = -EINVAL; + goto error_param; + } + + if (is_multicast_ether_addr(mac)) { + dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + ret = -EINVAL; + goto error_param; + } + + if (ether_addr_equal(vf->dev_lan_addr.addr, mac)) { + dev_err(&pf->pdev->dev, "already use the same Ethernet address %pM for VF %d\n", + mac, vf_id); + goto error_param; + } + + /*simluate a msg from vf*/ + usnap.snap.type = VIRTCHNL_OP_SET_VF_ADDR; + usnap.snap.state = VIRTCHNL_STATUS_SUCCESS; + usnap.snap.len = 6; + memcpy(usnap.snap.data, mac, usnap.snap.len); + ret = ne6x_vc_modify_vf_mac(vf, (u8 *)&usnap); + +error_param: + return ret; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h new file mode 100644 index 000000000000..2f094d164fe3 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_VIRTCHNL_PF_H +#define _NE6X_VIRTCHNL_PF_H + +#include "mailbox.h" + +#define NE6X_NO_ADPT 0xffff + +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + VIRTCHNL_EVENT_DCF_ADPT_MAP_UPDATE, +}; + +struct virtchnl_pf_event { + u8 event; + u32 link_speed; + u8 link_status; +}; + +union u_ne6x_mbx_snap_buffer_data { + struct ne6x_mbx_snap_buffer_data snap; + u64 val; +}; + +/* Specific VF states */ +enum ne6x_vf_states { + NE6X_VF_STATE_INIT = 0, /* PF is initializing VF */ + NE6X_VF_STATE_ACTIVE, /* VF resources are allocated for use */ + NE6X_VF_STATE_QS_ENA, /* VF queue(s) enabled */ + NE6X_VF_STATE_DIS, + NE6X_VF_STATE_MC_PROMISC, + NE6X_VF_STATE_UC_PROMISC, + NE6X_VF_STATES_NBITS +}; + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; +}; + +struct virtchnl_promisc_info { + u16 adpt_id; + u16 flags; +}; + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +enum ne6x_promisc_flags { + NE6X_PROMISC_UCAST_RX = 0x1, + NE6X_PROMISC_UCAST_TX = 0x2, + NE6X_PROMISC_MCAST_RX = 0x4, + NE6X_PROMISC_MCAST_TX = 0x8, + NE6X_PROMISC_BCAST_RX = 0x10, + NE6X_PROMISC_BCAST_TX = 0x20, + NE6X_PROMISC_VLAN_RX = 0x40, + NE6X_PROMISC_VLAN_TX = 0x80, +}; + +#define NE6X_UCAST_PROMISC_BITS (NE6X_PROMISC_UCAST_TX | NE6X_PROMISC_UCAST_RX) +#define NE6X_MCAST_PROMISC_BITS (NE6X_PROMISC_MCAST_TX | NE6X_PROMISC_MCAST_RX) + +enum ne6x_vf_config_flag { + NE6X_VF_CONFIG_FLAG_TRUSTED = 0, + NE6X_VF_CONFIG_FLAG_LINK_FORCED, + NE6X_VF_CONFIG_FLAG_NBITS /* must be last */ +}; + +struct ne6x_key { + u8 rsv0; + u8 pi; + u8 mac_addr[6]; + u8 rsv1[56]; +}; + +/* VF information structure */ +struct ne6x_vf { + struct ne6x_pf *pf; + struct ne6x_adapter *adpt; + + u16 vf_id; /* VF ID in the PF space */ + u16 lan_adpt_idx; /* index into PF struct */ + /* first vector index of this VF in the PF space */ + u16 vfp_vid; + u16 vfp_tpid; + int tx_rate; + u8 rx_tx_state; + bool ready; + bool ready_to_link_notify; + + u16 base_queue; + u16 num_vf_qs; + u16 num_req_qs; + + struct ne6x_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */ + + u8 trusted : 1; + u8 link_forced : 1; + u8 link_up : 1; /* only valid if VF link is forced */ + + struct virtchnl_ether_addr dev_lan_addr; + DECLARE_BITMAP(vf_states, NE6X_VF_STATES_NBITS); /* VF runtime states */ + DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); + DECLARE_BITMAP(vf_config_flag, NE6X_VF_CONFIG_FLAG_NBITS); +}; + +#define ne6x_for_each_vf(pf, i) for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) +#define ne6x_for_each_pf(pf, i) for ((i) = 0; (i) < (pf)->num_alloc_adpt; (i)++) + +#ifdef CONFIG_PCI_IOV +int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); +int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); + +int ne6x_sriov_configure(struct pci_dev *pdev, int num_vfs); +void ne6x_vc_process_vf_msg(struct ne6x_pf *pf); +void ne6x_vc_notify_link_state(struct ne6x_vf *vf); +int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +void ne6x_clr_vf_bw_for_max_vpnum(struct ne6x_pf *pf); + +struct ne6x_adapter *ne6x_get_vf_adpt(struct ne6x_vf *vf); +int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate); +int ne6x_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); + +#else /* CONFIG_PCI_IOV */ +static inline int ne6x_sriov_configure(struct pci_dev __always_unused *pdev, + int __always_unused num_vfs) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, + int min_tx_rate, int max_tx_rate) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_PCI_IOV */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6x_trace.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6x_trace.h new file mode 100644 index 000000000000..882ec242f7bb --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6x_trace.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef CONFIG_TRACEPOINTS +#if !defined(_NE6X_VF_TRACE_H_) +#define _NE6X_VF_TRACE_H_ + +#define ne6x_trace(trace_name, args...) +#define ne6x_trace_enabled(trace_name) (0) +#endif /* !defined(_NE6X_VF_TRACE_H_) */ +#else /* CONFIG_TRACEPOINTS */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ne6xvf + +#if !defined(_NE6X_VF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _NE6X_VF_TRACE_H_ + +#include +#include "trace_comm.h" +#endif /* _NE6X_TRACE_H_ */ +/* This must be outside ifdef _NE6X_VF_TRACE_H_ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE ne6x_trace +#include +#endif /* CONFIG_TRACEPOINTS */ diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h new file mode 100644 index 000000000000..9ee06262f0fb --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h @@ -0,0 +1,555 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_H +#define _NE6XVF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "common.h" +#include "feature.h" +#include "txrx.h" +#include "mailbox.h" +#include "ne6xvf_virtchnl.h" + +#define NE6XVF_MAX_AQ_BUF_SIZE 4096 +#define NE6XVF_AQ_LEN 32 +#define NE6XVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ + +#define NE6XVF_REG_ADDR(_VPID, _OFST) (((_VPID) << 12) + ((_OFST) << 4)) + +#define NE6XVF_DB_STATE 0x1a +#define NE6XVF_MAILBOX_DATA 0x19 +#define NE6XVF_PF_MAILBOX_DATA 0x18 + +#define NE6XVF_QC_TAIL1(_Q) (((_Q) << 12) | (NE6X_CQ_HD_POINTER << 4)) /* _i=0...15 Reset: PFR */ +#define NE6XVF_QTX_TAIL1(_Q) (((_Q) << 12) | (0 << 11) | 0) /* _i=0...15 Reset: PFR */ +#define NE6XVF_QRX_TAIL1(_Q) (((_Q) << 12) | (1 << 11) | 0) /* _i=0...15 Reset: PFR */ + +#define ne6xvf_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("ncevf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ +} while (0) + +#define hw_dbg(h, s, ...) \ + pr_debug("ncevf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__) + +extern char ne6xvf_driver_name[]; +extern const char ne6xvf_driver_version[]; +extern struct workqueue_struct *ne6xvf_wq; + +#define ne6xvf_init_spinlock(_sp) ne6xvf_init_spinlock_d(_sp) +#define ne6xvf_acquire_spinlock(_sp) ne6xvf_acquire_spinlock_d(_sp) +#define ne6xvf_release_spinlock(_sp) ne6xvf_release_spinlock_d(_sp) +#define ne6xvf_destroy_spinlock(_sp) ne6xvf_destroy_spinlock_d(_sp) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr0 + (reg))) +#define rd64(a, reg) readq((a)->hw_addr0 + (reg)) + +#define NE6XVF_READ_REG(hw, reg) rd64(hw, reg) +#define NE6XVF_WRITE_REG(hw, reg, value) wr64(hw, reg, value) + +#define NE6XVF_MAX_REQ_QUEUES 32 + +#define NE6XVF_RESET_WAIT_MS 10 +#define NE6XVF_RESET_WAIT_DETECTED_COUNT 50 +#define NE6XVF_RESET_WAIT_COMPLETE_COUNT 2000 + +enum ne6xvf_critical_section_t { + __NE6XVF_IN_CRITICAL_TASK, /* cannot be interrupted */ + __NE6XVF_IN_REMOVE_TASK, /* device being removed */ + __NE6XVF_TX_TSTAMP_IN_PROGRESS, /* PTP Tx timestamp request in progress */ +}; + +struct ne6xvf_vlan_filter { + struct list_head list; + struct ne6x_vf_vlan vlan; + struct { + u8 is_new_vlan : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + }; +}; + +struct ne6xvf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + struct { + u8 is_new_mac : 1; /* filter is new, wait for PF decision */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 is_primary : 1; /* filter is a default VF MAC */ + u8 add_handled : 1; /* received response from PF for filter add */ + u8 padding : 3; + }; +}; + +/* Driver state. The order of these is important! */ +enum ne6xvf_state_t { + __NE6XVF_STARTUP, /* driver loaded, probe complete */ + __NE6XVF_REMOVE, /* driver is being unloaded */ + __NE6XVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __NE6XVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */ + __NE6XVF_INIT_CONFIG_ADAPTER, + __NE6XVF_INIT_SW, /* got resources, setting up structs */ + __NE6XVF_INIT_FAILED, /* init failed, restarting procedure */ + __NE6XVF_RESETTING, /* in reset */ + __NE6XVF_COMM_FAILED, /* communication with PF failed */ + /* Below here, watchdog is running */ + __NE6XVF_DOWN, /* ready, can be opened */ + __NE6XVF_DOWN_PENDING, /* descending, waiting for watchdog */ + __NE6XVF_TESTING, /* in ethtool self-test */ + __NE6XVF_RUNNING /* opened, working */ +}; + +struct ne6xvf_mac_info { + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum ne6xvf_bus_speed { + ne6xvf_bus_speed_unknown = 0, + ne6xvf_bus_speed_33 = 33, + ne6xvf_bus_speed_66 = 66, + ne6xvf_bus_speed_100 = 100, + ne6xvf_bus_speed_120 = 120, + ne6xvf_bus_speed_133 = 133, + ne6xvf_bus_speed_2500 = 2500, + ne6xvf_bus_speed_5000 = 5000, + ne6xvf_bus_speed_8000 = 8000, + ne6xvf_bus_speed_reserved +}; + +enum ne6xvf_bus_width { + ne6xvf_bus_width_unknown = 0, + ne6xvf_bus_width_pcie_x1 = 1, + ne6xvf_bus_width_pcie_x2 = 2, + ne6xvf_bus_width_pcie_x4 = 4, + ne6xvf_bus_width_pcie_x8 = 8, + ne6xvf_bus_width_32 = 32, + ne6xvf_bus_width_64 = 64, + ne6xvf_bus_width_reserved +}; + +enum ne6xvf_bus_type { + ne6xvf_bus_type_unknown = 0, + ne6xvf_bus_type_pci, + ne6xvf_bus_type_pcix, + ne6xvf_bus_type_pci_express, + ne6xvf_bus_type_reserved +}; + +struct ne6xvf_bus_info { + enum ne6xvf_bus_speed speed; + enum ne6xvf_bus_width width; + enum ne6xvf_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +struct ne6xvf_hw_capabilities { + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors_vf; + u32 max_mtu; + u32 chip_id; + u32 mac_id; + u32 lport; + u32 vf_id; + u32 num_vf_per_pf; +}; + +struct ne6xvf_hw { + u8 __iomem *hw_addr0; + u8 __iomem *hw_addr2; + void *back; + + /* subsystem structs */ + struct ne6xvf_mac_info mac; + struct ne6xvf_bus_info bus; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* capabilities for entire device and PCI func */ + struct ne6xvf_hw_capabilities dev_caps; + + struct ne6xvf_sdk_mbx_info mbx; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +struct ne6xvf_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +#define NE6XVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define NE6XVF_FLAG_PF_COMMS_FAILED BIT(3) +#define NE6XVF_FLAG_RESET_PENDING BIT(4) +#define NE6XVF_FLAG_RESET_NEEDED BIT(5) +#define NE6XVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) +#define NE6XVF_FLAG_PROMISC_ON BIT(13) +#define NE6XVF_FLAG_ALLMULTI_ON BIT(14) + +#define NE6XVF_FLAG_LEGACY_RX BIT(15) +#define NE6XVF_FLAG_REINIT_ITR_NEEDED BIT(16) +#define NE6XVF_FLAG_QUEUES_ENABLED BIT(17) +#define NE6XVF_FLAG_QUEUES_DISABLED BIT(18) +#define NE6XVF_FLAG_REINIT_MSIX_NEEDED BIT(20) +#define NE6XF_FLAG_REINIT_CHNL_NEEDED BIT(21) +#define NE6XF_FLAG_RESET_DETECTED BIT(22) +#define NE6XF_FLAG_INITIAL_MAC_SET BIT(23) + +#define NE6XVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0) +#define NE6XVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2) +#define NE6XVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3) +#define NE6XVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4) +#define NE6XVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5) +#define NE6XVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6) +#define NE6XVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7) +#define NE6XVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8) +#define NE6XVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */ +#define NE6XVF_FLAG_AQ_GET_CONFIG BIT_ULL(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define NE6XVF_FLAG_AQ_GET_HENA BIT_ULL(11) +#define NE6XVF_FLAG_AQ_SET_HENA BIT_ULL(12) +#define NE6XVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) +#define NE6XVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) +#define NE6XVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) +#define NE6XVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) +#define NE6XVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) +#define NE6XVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) + +#define NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD BIT_ULL(38) +#define NE6XVF_FLAG_AQ_GET_FEATURE BIT_ULL(39) +#define NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS BIT_ULL(40) +#define NE6XVF_FLAG_AQ_SET_VF_MAC BIT_ULL(41) +#define NE6XVF_FLAG_AQ_CHANGED_RSS BIT_ULL(42) + +struct ne6xvf_adapter { + struct ne6x_adapt_comm comm; + struct work_struct sdk_task; + struct delayed_work watchdog_task; + wait_queue_head_t down_waitqueue; + wait_queue_head_t vc_waitqueue; + struct ne6x_q_vector *q_vectors; + struct list_head vlan_filter_list; + struct list_head mac_filter_list; + struct list_head macvlan_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + char misc_vector_name[IFNAMSIZ + 9]; + u16 max_queues; + u16 num_active_queues; + u16 num_req_queues; + u32 hw_feature; + struct ne6x_ring *tg_rings; /* TG */ + struct ne6x_ring *cq_rings; /* CQ */ + u32 cq_desc_count; + + /* TX */ + struct ne6x_ring *tx_rings; + u32 tx_timeout_count; + u32 tx_desc_count; + + /* RX */ + struct ne6x_ring *rx_rings; + u64 hw_csum_rx_error; + u32 rx_desc_count; + int num_msix_vectors; + struct msix_entry *msix_entries; + + u32 flags; + + /* duplicates for common code */ +#define NE6XVF_FLAG_DCB_ENABLED 0 + + /* flags for admin queue service task */ + u64 aq_required; + + /* Lock to prevent possible clobbering of + * current_netdev_promisc_flags + */ + spinlock_t current_netdev_promisc_flags_lock; + + netdev_features_t current_netdev_promisc_flags; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + struct net_device_stats net_stats; + + struct ne6xvf_hw hw; /* defined in ne6xvf.h */ + + enum ne6xvf_state_t state; + enum ne6xvf_state_t last_state; + unsigned long crit_section; + + bool netdev_registered; + bool link_up; + enum ne6x_sdk_link_speed link_speed; + enum virtchnl_ops current_op; + struct virtchnl_vf_resource *vf_res; + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + + struct ne6xvf_eth_stats current_stats; + //struct ne6xvf_vsi vsi; + u16 msg_enable; + struct ne6x_rss_info rss_info; + u8 trusted; + +#ifdef CONFIG_DEBUG_FS + struct dentry *ne6xvf_dbg_pf; +#endif /* CONFIG_DEBUG_FS */ +}; + +#ifdef CONFIG_DEBUG_FS +#define NCE_DEBUG_CHAR_LEN 1024 + +struct ne6xvf_dbg_cmd_wr { + char command[NCE_DEBUG_CHAR_LEN]; + void (*command_proc)(struct ne6xvf_adapter *pf); +}; + +void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf); +void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf); +void ne6xvf_dbg_init(void); +void ne6xvf_dbg_exit(void); +#else +static inline void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf) { } +static inline void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf) { } +static inline void ne6xvf_dbg_init(void) { } +static inline void ne6xvf_dbg_exit(void) { } +#endif /* CONFIG_DEBUG_FS */ + +/* Error Codes */ +enum ne6xvf_status { + NE6XVF_SUCCESS = 0, + NE6XVF_ERR_NVM = -1, + NE6XVF_ERR_NVM_CHECKSUM = -2, + NE6XVF_ERR_PHY = -3, + NE6XVF_ERR_CONFIG = -4, + NE6XVF_ERR_PARAM = -5, + NE6XVF_ERR_MAC_TYPE = -6, + NE6XVF_ERR_UNKNOWN_PHY = -7, + NE6XVF_ERR_LINK_SETUP = -8, + NE6XVF_ERR_ADAPTER_STOPPED = -9, + NE6XVF_ERR_INVALID_MAC_ADDR = -10, + NE6XVF_ERR_DEVICE_NOT_SUPPORTED = -11, + NE6XVF_ERR_MASTER_REQUESTS_PENDING = -12, + NE6XVF_ERR_INVALID_LINK_SETTINGS = -13, + NE6XVF_ERR_AUTONEG_NOT_COMPLETE = -14, + NE6XVF_ERR_RESET_FAILED = -15, + NE6XVF_ERR_SWFW_SYNC = -16, + NE6XVF_ERR_NO_AVAILABLE_VSI = -17, + NE6XVF_ERR_NO_MEMORY = -18, + NE6XVF_ERR_BAD_PTR = -19, + NE6XVF_ERR_RING_FULL = -20, + NE6XVF_ERR_INVALID_PD_ID = -21, + NE6XVF_ERR_INVALID_QP_ID = -22, + NE6XVF_ERR_INVALID_CQ_ID = -23, + NE6XVF_ERR_INVALID_CEQ_ID = -24, + NE6XVF_ERR_INVALID_AEQ_ID = -25, + NE6XVF_ERR_INVALID_SIZE = -26, + NE6XVF_ERR_INVALID_ARP_INDEX = -27, + NE6XVF_ERR_INVALID_FPM_FUNC_ID = -28, + NE6XVF_ERR_QP_INVALID_MSG_SIZE = -29, + NE6XVF_ERR_QP_TOOMANY_WRS_POSTED = -30, + NE6XVF_ERR_INVALID_FRAG_COUNT = -31, + NE6XVF_ERR_QUEUE_EMPTY = -32, + NE6XVF_ERR_INVALID_ALIGNMENT = -33, + NE6XVF_ERR_FLUSHED_QUEUE = -34, + NE6XVF_ERR_INVALID_PUSH_PAGE_INDEX = -35, + NE6XVF_ERR_INVALID_IMM_DATA_SIZE = -36, + NE6XVF_ERR_TIMEOUT = -37, + NE6XVF_ERR_OPCODE_MISMATCH = -38, + NE6XVF_ERR_CQP_COMPL_ERROR = -39, + NE6XVF_ERR_INVALID_VF_ID = -40, + NE6XVF_ERR_INVALID_HMCFN_ID = -41, + NE6XVF_ERR_BACKING_PAGE_ERROR = -42, + NE6XVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + NE6XVF_ERR_INVALID_PBLE_INDEX = -44, + NE6XVF_ERR_INVALID_SD_INDEX = -45, + NE6XVF_ERR_INVALID_PAGE_DESC_INDEX = -46, + NE6XVF_ERR_INVALID_SD_TYPE = -47, + NE6XVF_ERR_MEMCPY_FAILED = -48, + NE6XVF_ERR_INVALID_HMC_OBJ_INDEX = -49, + NE6XVF_ERR_INVALID_HMC_OBJ_COUNT = -50, + NE6XVF_ERR_INVALID_SRQ_ARM_LIMIT = -51, + NE6XVF_ERR_SRQ_ENABLED = -52, + NE6XVF_ERR_ADMIN_QUEUE_ERROR = -53, + NE6XVF_ERR_ADMIN_QUEUE_TIMEOUT = -54, + NE6XVF_ERR_BUF_TOO_SHORT = -55, + NE6XVF_ERR_ADMIN_QUEUE_FULL = -56, + NE6XVF_ERR_ADMIN_QUEUE_NO_WORK = -57, + NE6XVF_ERR_BAD_IWARP_CQE = -58, + NE6XVF_ERR_NVM_BLANK_MODE = -59, + NE6XVF_ERR_NOT_IMPLEMENTED = -60, + NE6XVF_ERR_PE_DOORBELL_NOT_ENABLED = -61, + NE6XVF_ERR_DIAG_TEST_FAILED = -62, + NE6XVF_ERR_NOT_READY = -63, + NE6XVF_NOT_SUPPORTED = -64, + NE6XVF_ERR_FIRMWARE_API_VERSION = -65, + NE6XVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, +}; + +static inline const char *ne6xvf_state_str(enum ne6xvf_state_t state) +{ + switch (state) { + case __NE6XVF_STARTUP: + return "__NE6XVF_STARTUP"; + case __NE6XVF_REMOVE: + return "__NE6XVF_REMOVE"; + case __NE6XVF_INIT_GET_RESOURCES: + return "__NE6XVF_INIT_GET_RESOURCES"; + case __NE6XVF_INIT_EXTENDED_CAPS: + return "__NE6XVF_INIT_EXTENDED_CAPS"; + case __NE6XVF_INIT_CONFIG_ADAPTER: + return "__NE6XVF_INIT_CONFIG_ADAPTER"; + case __NE6XVF_INIT_SW: + return "__NE6XVF_INIT_SW"; + case __NE6XVF_INIT_FAILED: + return "__NE6XVF_INIT_FAILED"; + case __NE6XVF_RESETTING: + return "__NE6XVF_RESETTING"; + case __NE6XVF_COMM_FAILED: + return "__NE6XVF_COMM_FAILED"; + case __NE6XVF_DOWN: + return "__NE6XVF_DOWN"; + case __NE6XVF_DOWN_PENDING: + return "__NE6XVF_DOWN_PENDING"; + case __NE6XVF_TESTING: + return "__NE6XVF_TESTING"; + case __NE6XVF_RUNNING: + return "__NE6XVF_RUNNING"; + default: + return "__NE6XVF_UNKNOWN_STATE"; + } +} + +static inline void ne6xvf_change_state(struct ne6xvf_adapter *adapter, enum ne6xvf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } +} + +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + +int ne6xvf_send_api_ver(struct ne6xvf_adapter *adapter); +int ne6xvf_send_vf_config_msg(struct ne6xvf_adapter *adapter, bool b_init); +int ne6xvf_send_vf_offload_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_send_vf_feature_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_get_vf_config(struct ne6xvf_adapter *adapter); +int ne6xvf_request_reset(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_tg_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_cq_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_tx_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_rx_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_reset_interrupt_capability(struct ne6xvf_adapter *adapter); +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); +void ne6xvf_set_ethtool_ops(struct net_device *netdev); +void ne6xvf_request_stats(struct ne6xvf_adapter *adapter); +void ne6xvf_irq_enable(struct ne6xvf_adapter *adapter, bool flush); +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter); +enum ne6xvf_status ne6xvf_clean_arq_element(struct ne6xvf_hw *hw, struct ne6xvf_arq_event_info *e, + u16 *pending); +void ne6xvf_virtchnl_completion(struct ne6xvf_adapter *adapter, enum virtchnl_ops v_opcode, + enum ne6xvf_status v_retval, u8 *msg, u16 msglen); +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter); +int ne6xvf_request_feature(struct ne6xvf_adapter *adapter); +int ne6xvf_config_default_vlan(struct ne6xvf_adapter *adapter); +void ne6xvf_config_rss_info(struct ne6xvf_adapter *adapter); +void ne6xvf_changed_rss(struct ne6xvf_adapter *adapter); + +void ne6xvf_add_vlans(struct ne6xvf_adapter *adapter); +void ne6xvf_del_vlans(struct ne6xvf_adapter *adapter); +void ne6xvf_schedule_reset(struct ne6xvf_adapter *adapter); +int ne6xvf_parse_vf_resource_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_request_queues(struct ne6xvf_adapter *adapter, int num); +void ne6xvf_add_ether_addrs(struct ne6xvf_adapter *adapter); +void ne6xvf_del_ether_addrs(struct ne6xvf_adapter *adapter); +void ne6xvf_set_promiscuous(struct ne6xvf_adapter *adapter); +int ne6xvf_poll_virtchnl_msg(struct ne6xvf_adapter *adapter, struct ne6xvf_arq_event_info *event, + enum virtchnl_ops op_to_poll); +int ne6xvf_enable_queues(struct ne6xvf_adapter *adapter); +void ne6xvf_update_pf_stats(struct ne6xvf_adapter *adapter); +int ne6xvf_send_pf_msg(struct ne6xvf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len); +void ne6xvf_vchanel_get_port_link_status(struct ne6xvf_adapter *adapter); +void ne6xvf_set_vf_addr(struct ne6xvf_adapter *adapter); +int ne6xvf_close(struct net_device *netdev); +int ne6xvf_open(struct net_device *netdev); +void ne6xvf_fill_rss_lut(struct ne6xvf_adapter *adapter); +void ne6xvf_tail_update(struct ne6x_ring *ring, int val); +int ne6xvf_register_netdev(struct ne6xvf_adapter *adapter); + +#endif /* _NE6XVF_H */ diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c new file mode 100644 index 000000000000..66f589020e23 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6xvf.h" + +static struct dentry *ne6xvf_dbg_root; + +void ne6xvf_showqueue(struct ne6xvf_adapter *pf) +{ + struct ne6x_ring *ring; + u64 head, tail, oft; + int i; + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->rx_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_OFST)); + dev_info(&pf->pdev->dev, "----RX: Queue[%d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], idle:%04d, alloc:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + oft, + NE6X_DESC_UNUSED(ring), + ring->next_to_alloc, + ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->tx_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_OFST)); + dev_info(&pf->pdev->dev, "----TX: Queue[%d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + oft, + NE6X_DESC_UNUSED(ring), + ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->cq_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_CQ_TAIL_POINTER)); + dev_info(&pf->pdev->dev, "----CQ: Queue[%d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + NE6X_DESC_UNUSED(ring), + ring->next_to_use, + ring->next_to_clean); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); +} + +void ne6xvf_showring(struct ne6xvf_adapter *pf) +{ + struct ne6x_tx_desc *tx_desc; + struct ne6x_cq_desc *cq_desc; + union ne6x_rx_desc *rx_desc; + struct ne6x_ring *ring; + int j, k; + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->rx_rings[j]; + + for (k = 0; k < ring->count; k++) { + rx_desc = NE6X_RX_DESC(ring, k); + if (!rx_desc->wb.u.val) + /* empty descriptor, skip */ + continue; + + dev_info(&pf->pdev->dev, "**** rx_desc[%d], vp[%d], m_len[%d], s_len[%d], s_addr[0x%llx], m_addr[0x%llx], flag[0x%x], vp[%d], pkt_len[%d]\n", + k, + rx_desc->w.vp, + rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, + rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, + rx_desc->wb.u.val, + rx_desc->wb.vp, + rx_desc->wb.pkt_len); + } + } + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->tx_rings[j]; + + for (k = 0; k < ring->count; k++) { + tx_desc = NE6X_TX_DESC(ring, k); + if (!tx_desc->buffer_sop_addr) + /* empty descriptor, skp */ + continue; + + dev_info(&pf->pdev->dev, "**** tx_desc[%d], flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d],sopv[%d],eopv[%d],tso[%d],l3chk[%d],l3oft[%d],l4chk[%d],l4oft[%d],pld[%d],mop[%d],sop[%d],mss[%d],mopa[%lld],sopa[%lld]\n", + k, + tx_desc->u.val, + tx_desc->vp, + tx_desc->event_trigger, + tx_desc->chain, + tx_desc->transmit_type, + tx_desc->sop_valid, + tx_desc->eop_valid, + tx_desc->tso, + tx_desc->l3_csum, + tx_desc->l3_ofst, + tx_desc->l4_csum, + tx_desc->l4_ofst, + tx_desc->pld_ofst, + tx_desc->mop_cnt, + tx_desc->sop_cnt, + tx_desc->mss, + tx_desc->buffer_mop_addr, + tx_desc->buffer_sop_addr); + } + } + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->cq_rings[j]; + + for (k = 0; k < ring->count; k++) { + cq_desc = NE6X_CQ_DESC(ring, k); + if (!cq_desc->num) + /* empty descriptor, skip */ + continue; + + dev_info(&pf->pdev->dev, "**** cq_desc[%d], vp[%d], ctype[%d], num[%d]\n", + k, + ring->reg_idx, + cq_desc->ctype, + cq_desc->num); + } + } +} + +const struct ne6xvf_dbg_cmd_wr deg_cmd_wr[] = { + {"queue", ne6xvf_showqueue}, + {"ring", ne6xvf_showring}, +}; + +/** + * nce_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ne6xvf_dbg_command_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + return 0; +} + +/** + * ne6xvf_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6xvf_dbg_command_write(struct file *filp, const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ne6xvf_adapter *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + int i, cnt; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NCE_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "read", 4) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value = 0; + + cnt = sscanf(&cmd_buf[4], "%i %i", &base_addr, &offset_addr); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + dev_info(&pf->pdev->dev, "read: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value = 0; + + cnt = sscanf(&cmd_buf[5], "%i %i %lli ", &base_addr, &offset_addr, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + dev_info(&pf->pdev->dev, "write: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else { + for (i = 0; i < ARRAY_SIZE(deg_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_cmd_wr[i].command, count) == 0) { + deg_cmd_wr[i].command_proc(pf); + goto command_write_done; + } + } + + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6xvf_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6xvf_dbg_command_read, + .write = ne6xvf_dbg_command_write, +}; + +/** + * nce_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf) +{ + const struct device *dev = &pf->pdev->dev; + const char *name = pci_name(pf->pdev); + struct dentry *pfile; + + pf->ne6xvf_dbg_pf = debugfs_create_dir(name, ne6xvf_dbg_root); + if (!pf->ne6xvf_dbg_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->ne6xvf_dbg_pf, pf, + &ne6xvf_dbg_command_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->ne6xvf_dbg_pf); +} + +/** + * nce_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf) +{ + debugfs_remove_recursive(pf->ne6xvf_dbg_pf); + pf->ne6xvf_dbg_pf = NULL; +} + +/** + * nce_dbg_init - start up debugfs for the driver + **/ +void ne6xvf_dbg_init(void) +{ + ne6xvf_dbg_root = debugfs_create_dir(ne6xvf_driver_name, NULL); + if (!ne6xvf_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * nce_dbg_exit - clean out the driver's debugfs entries + **/ +void ne6xvf_dbg_exit(void) +{ + debugfs_remove_recursive(ne6xvf_dbg_root); + ne6xvf_dbg_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c new file mode 100644 index 000000000000..3fbab2d87066 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_ethtool_stats.h" +#include "ne6xvf_txrx.h" + +static const char ne6xvf_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Eeprom test (offline)", + "Interrupt test (offline)", + "Link test (on/offline)" +}; + +#define NE6XVF_TEST_LEN (sizeof(ne6xvf_gstrings_test) / ETH_GSTRING_LEN) + +static int ne6xvf_q_stats_len(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int stats_size, total_slen = 0; + + /* Tx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_txq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + /* Rx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_rxq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + /* CQ stats */ + stats_size = sizeof(struct ne6x_cq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + return total_slen; +} + +struct ne6xvf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define NE6XVF_NETDEV_STAT(_net_stat) NE6XVF_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +/* per-queue ring statistics */ +#define NE6XVF_QUEUE_STAT(_name, _stat) NE6XVF_STAT(struct ne6x_ring, _name, _stat) + +static const struct ne6xvf_stats ne6xvf_gstrings_tx_queue_stats[] = { + NE6XVF_QUEUE_STAT("tx_queue_%u_packets", stats.packets), + NE6XVF_QUEUE_STAT("tx_queue_%u_bytes", stats.bytes), + NE6XVF_QUEUE_STAT("tx_queue_%u_rst", tx_stats.restart_q), + NE6XVF_QUEUE_STAT("tx_queue_%u_busy", tx_stats.tx_busy), + NE6XVF_QUEUE_STAT("tx_queue_%u_line", tx_stats.tx_linearize), + NE6XVF_QUEUE_STAT("tx_queue_%u_csum_err", tx_stats.csum_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_csum", tx_stats.csum_good), + NE6XVF_QUEUE_STAT("tx_queue_%u_pcie_read_err", tx_stats.tx_pcie_read_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_ecc_err", tx_stats.tx_ecc_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_drop_addr", tx_stats.tx_drop_addr), +}; + +static const struct ne6xvf_stats ne6xvf_gstrings_rx_queue_stats[] = { + NE6XVF_QUEUE_STAT("rx_queue_%u_packets", stats.packets), + NE6XVF_QUEUE_STAT("rx_queue_%u_bytes", stats.bytes), + NE6XVF_QUEUE_STAT("rx_queue_%u_no_eop", rx_stats.non_eop_descs), + NE6XVF_QUEUE_STAT("rx_queue_%u_alloc_pg_err", rx_stats.alloc_page_failed), + NE6XVF_QUEUE_STAT("rx_queue_%u_alloc_buf_err", rx_stats.alloc_buf_failed), + NE6XVF_QUEUE_STAT("rx_queue_%u_pg_reuse", rx_stats.page_reuse_count), + NE6XVF_QUEUE_STAT("rx_queue_%u_csum_err", rx_stats.csum_err), + NE6XVF_QUEUE_STAT("rx_queue_%u_csum", rx_stats.csum_good), + NE6XVF_QUEUE_STAT("rx_queue_%u_mem_err", rx_stats.rx_mem_error), + NE6XVF_QUEUE_STAT("rx_queue_%u_rx_err", rx_stats.rx_err), +}; + +static const struct ne6xvf_stats ne6xvf_gstrings_cq_queue_stats[] = { + NE6XVF_QUEUE_STAT("cx_queue_%u_nums", cq_stats.cq_num), + NE6XVF_QUEUE_STAT("cx_queue_%u_tx_nums", cq_stats.tx_num), + NE6XVF_QUEUE_STAT("cx_queue_%u_rx_nums", cq_stats.rx_num), +}; + +/* port mac statistics */ +#define NE6XVF_PORT_MAC_STAT(_name, _stat) NE6XVF_STAT(struct ne6xvf_vsi, _name, _stat) + +#define NE6XVF_ALL_STATS_LEN(n) (ne6xvf_q_stats_len(n)) + +#define ne6xvf_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ + ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_mode) + +static void ne6xvf_get_settings_link_up(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + switch (adapter->link_speed) { + case NE6X_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + break; + case NE6X_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + break; + case NE6X_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + break; + case NE6X_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + break; + case NE6X_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; + default: + netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", + adapter->link_speed); + break; + } + ks->base.duplex = DUPLEX_FULL; +} + +/** + * ne6xvf_get_settings_link_down - Get the Link settings when link is down + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + */ +static void ne6xvf_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +/** + * ne6xvf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Reports speed/duplex settings based on media_type + */ +static int ne6xvf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ks) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + + ks->base.port = PORT_NONE; + if (adapter->link_up) { + /* Set flow control settings */ + ne6xvf_get_settings_link_up(ks, netdev); + } else { + ne6xvf_get_settings_link_down(ks, netdev); + } + + return 0; +} + +/** + * ne6xvf_set_link_ksettings - Set Speed and Duplex + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Set speed/duplex per media_types advertised/forced + */ +static int ne6xvf_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + return -EOPNOTSUPP; +} + +static void __ne6xvf_add_stat_strings(u8 **p, const struct ne6xvf_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +#define ne6xvf_add_stat_strings(p, stats, ...) \ + __ne6xvf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ##__VA_ARGS__) + +static void ne6xvf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + unsigned int i; + + for (i = 0; i < adapter->num_active_queues; i++) { + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_tx_queue_stats, i); + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_rx_queue_stats, i); + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_cq_queue_stats, i); + } +} + +static void ne6xvf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + ne6xvf_get_stat_strings(netdev, data); + break; + case ETH_SS_TEST: + memcpy(data, ne6xvf_gstrings_test, NE6XVF_TEST_LEN * ETH_GSTRING_LEN); + default: + break; + } +} + +static int ne6xvf_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + /* The number (and order) of strings reported *must* remain + * constant for a given netdevice. This function must not + * report a different number based on run time parameters + * (such as the number of queues in use, or the setting of + * a private ethtool flag). This is due to the nature of the + * ethtool stats API. + * + * Userspace programs such as ethtool must make 3 separate + * ioctl requests, one for size, one for the strings, and + * finally one for the stats. Since these cross into + * userspace, changes to the number or size could result in + * undefined memory access or incorrect string<->value + * correlations for statistics. + * + * Even if it appears to be safe, changes to the size or + * order of strings will suffer from race conditions and are + * not safe. + */ + return NE6XVF_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return NE6XVF_TEST_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ne6xvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + unsigned int j; + int i = 0; + + ne6xvf_update_pf_stats(adapter); + + /* populate per queue stats */ + rcu_read_lock(); + for (j = 0; j < adapter->num_active_queues; j++) { + tx_ring = &adapter->tx_rings[j]; + if (tx_ring) { + data[i++] = tx_ring->stats.packets; + data[i++] = tx_ring->stats.bytes; + data[i++] = tx_ring->tx_stats.restart_q; + data[i++] = tx_ring->tx_stats.tx_busy; + data[i++] = tx_ring->tx_stats.tx_linearize; + data[i++] = tx_ring->tx_stats.csum_err; + data[i++] = tx_ring->tx_stats.csum_good; + data[i++] = tx_ring->tx_stats.tx_pcie_read_err; + data[i++] = tx_ring->tx_stats.tx_ecc_err; + data[i++] = tx_ring->tx_stats.tx_drop_addr; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + rx_ring = &adapter->rx_rings[j]; + if (rx_ring) { + data[i++] = rx_ring->stats.packets; + data[i++] = rx_ring->stats.bytes; + data[i++] = rx_ring->rx_stats.non_eop_descs; + data[i++] = rx_ring->rx_stats.alloc_page_failed; + data[i++] = rx_ring->rx_stats.alloc_buf_failed; + data[i++] = rx_ring->rx_stats.page_reuse_count; + data[i++] = rx_ring->rx_stats.csum_err; + data[i++] = rx_ring->rx_stats.csum_good; + data[i++] = rx_ring->rx_stats.rx_mem_error; + data[i++] = rx_ring->rx_stats.rx_err; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + cq_ring = &adapter->cq_rings[j]; + if (cq_ring) { + data[i++] = cq_ring->cq_stats.cq_num; + data[i++] = cq_ring->cq_stats.tx_num; + data[i++] = cq_ring->cq_stats.rx_num; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + } + rcu_read_unlock(); +} + +static void ne6xvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, ne6xvf_driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, ne6xvf_driver_version, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); +} + +static void ne6xvf_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) {} + +static void ne6xvf_self_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, sizeof(*data) * NE6XVF_TEST_LEN); +} + +static int ne6xvf_get_regs_len(struct net_device *netdev) +{ + return 0; +} + +static void ne6xvf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_desc_count; + ring->tx_pending = adapter->tx_desc_count; + ring->rx_mini_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_pending = 0; +} + +static int ne6xvf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count, new_cq_count; + int err; + + if (ring->tx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->tx_pending < NE6X_MIN_NUM_DESCRIPTORS || + ring->rx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->rx_pending < NE6X_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, NE6X_MIN_NUM_DESCRIPTORS, + NE6X_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_cq_count = new_rx_count + new_rx_count; + + if (new_tx_count == adapter->tx_desc_count && new_rx_count == adapter->rx_desc_count) + return 0; + + if (!netif_running(adapter->netdev)) { + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + adapter->cq_desc_count = new_cq_count; + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + return 0; + } + + err = ne6xvf_close(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to close vf\n"); + return err; + } + netdev_info(netdev, "Descriptors change from (Tx: %d / Rx: %d) to [%d-%d]\n", + adapter->tx_rings[0].count, adapter->rx_rings[0].count, new_tx_count, + new_rx_count); + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + adapter->cq_desc_count = new_cq_count; + + err = ne6xvf_open(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to open vf\n"); + return err; + } + + return 0; +} + +/** + * ne6xvf_get_pauseparam - Get Flow Control status + * @netdev: netdevice structure + * @pause: buffer to return pause parameters + * + * Return tx/rx-pause status + **/ +static void ne6xvf_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + pause->autoneg = 0; + pause->rx_pause = 0; + pause->tx_pause = 0; +} + +/** + * ne6xvf_get_coalesce - get a netdev's coalesce settings + * @netdev: the netdev to check + * @ec: ethtool coalesce data structure + * + **/ +static int ne6xvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + ec->tx_max_coalesced_frames_irq = 256; + ec->rx_max_coalesced_frames_irq = 256; + ec->use_adaptive_rx_coalesce = 0; + ec->use_adaptive_tx_coalesce = 0; + ec->rx_coalesce_usecs = 0; + ec->tx_coalesce_usecs = 0; + ec->rx_coalesce_usecs_high = 0; + ec->tx_coalesce_usecs_high = 0; + + return 0; +} + +static int ne6xvf_get_eeprom_len(struct net_device *netdev) +{ + return 0x64; +} + +static int ne6xvf_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + int blink_freq = 2; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return blink_freq; + case ETHTOOL_ID_ON: + break; + case ETHTOOL_ID_OFF: + break; + case ETHTOOL_ID_INACTIVE: + break; + default: + break; + } + + return 0; +} + +static int ne6xvf_nway_reset(struct net_device *netdev) +{ + return 0; +} + +static void ne6xvf_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + data[NE6XVF_ETH_TEST_LINK] = 0; + + /* Offline only tests, not run in online; pass by default */ + data[NE6XVF_ETH_TEST_REG] = 0; + data[NE6XVF_ETH_TEST_EEPROM] = 0; + data[NE6XVF_ETH_TEST_INTR] = 0; +} + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) +static int ne6xvf_get_rss_hash_opts(struct ne6xvf_adapter *adapter, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ + data |= RXH_IP_SRC | RXH_IP_DST; + break; + } + + return data; +} + +static int ne6xvf_set_rss_hash_opts(struct ne6xvf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + u16 rss_flags = adapter->rss_info.hash_type; + + if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_TCP; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_TCP; + break; + case UDP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_UDP; + break; + case UDP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_UDP; + break; + default: + return -EINVAL; + } + + if (rss_flags == adapter->rss_info.hash_type) + return 0; + + adapter->rss_info.hash_type = rss_flags; + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + + return 0; +} + +/** + * ne6xvf_set_rxnfc - command to set Rx flow rules. + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns 0 for success and negative values for errors + */ +static int ne6xvf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + ret = ne6xvf_set_rss_hash_opts(adapter, info); + break; + default: + break; + } + + return ret; +} + +/** + * iavf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + **/ +static int ne6xvf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_active_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + cmd->data = ne6xvf_get_rss_hash_opts(adapter, cmd->flow_type); + break; + default: + break; + } + + return 0; +} + +/** + * ne6xvf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 ne6xvf_get_rxfh_key_size(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_info.hash_key_size; +} + +/** + * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 ne6xvf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_info.ind_table_size; +} + +/** + * ne6xvf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + **/ +static int ne6xvf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + u16 i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (key) + memcpy(key, adapter->rss_info.hash_key, adapter->rss_info.hash_key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + indir[i] = (u32)adapter->rss_info.ind_table[i]; + } + + return 0; +} + +/** + * ne6xvf_set_rxfh - set the Rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Returns -EINVAL if the table specifies an invalid queue ID, otherwise + * returns 0 after programming the table. + */ +static int ne6xvf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!key && !indir) + return 0; + + if (key) + memcpy(&adapter->rss_info.hash_key[0], key, adapter->rss_info.hash_key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + adapter->rss_info.ind_table[i] = (u8)(indir[i]); + } + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + + return 0; +} + +/** + * iavf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * For the purposes of our device, we only use combined channels, i.e. a tx/rx + * queue pair. Report one extra channel to match our "other" MSI-X vector. + **/ +static void ne6xvf_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = adapter->max_queues; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = adapter->num_active_queues; +} + +/** + * ne6xvf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with the PF then do a reset. During + * reset we'll realloc queues and fix the RSS table. Returns 0 on success, + * negative on failure. + **/ +static int ne6xvf_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (!channels->combined_count || channels->rx_count || channels->tx_count || + channels->combined_count > adapter->vf_res->num_queue_pairs) + return -EINVAL; + + if (channels->rx_count == adapter->num_active_queues) { + /* nothing to do */ + netdev_info(netdev, "channel not change, nothing to do!\n"); + return 0; + } + + /* set for the next time the netdev is started */ + if (!netif_running(adapter->netdev)) { + adapter->num_active_queues = channels->combined_count; + + netif_set_real_num_rx_queues(adapter->netdev, adapter->num_active_queues); + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_active_queues); + + ne6xvf_fill_rss_lut(adapter); + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + + return 0; + } + + err = ne6xvf_close(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to close vf\n"); + return err; + } + + adapter->num_active_queues = channels->combined_count; + + netif_set_real_num_rx_queues(adapter->netdev, adapter->num_active_queues); + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_active_queues); + + ne6xvf_fill_rss_lut(adapter); + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + err = ne6xvf_open(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to open vf\n"); + return err; + } + + return 0; +} + +static const struct ethtool_ops ne6xvf_ethtool_ops = { + .get_link_ksettings = ne6xvf_get_link_ksettings, + .set_link_ksettings = ne6xvf_set_link_ksettings, + .get_strings = ne6xvf_get_strings, + .get_sset_count = ne6xvf_get_sset_count, + .get_ethtool_stats = ne6xvf_get_ethtool_stats, + .get_drvinfo = ne6xvf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_regs = ne6xvf_get_regs, + .get_regs_len = ne6xvf_get_regs_len, + .self_test = ne6xvf_self_test, + .get_ringparam = ne6xvf_get_ringparam, + .set_ringparam = ne6xvf_set_ringparam, + .get_pauseparam = ne6xvf_get_pauseparam, + .get_coalesce = ne6xvf_get_coalesce, + .get_eeprom_len = ne6xvf_get_eeprom_len, + .get_rxnfc = ne6xvf_get_rxnfc, + .set_rxnfc = ne6xvf_set_rxnfc, + .get_rxfh_key_size = ne6xvf_get_rxfh_key_size, + .get_rxfh_indir_size = ne6xvf_get_rxfh_indir_size, + .get_rxfh = ne6xvf_get_rxfh, + .set_rxfh = ne6xvf_set_rxfh, + .get_channels = ne6xvf_get_channels, + .set_channels = ne6xvf_set_channels, + .set_phys_id = ne6xvf_set_phys_id, + .nway_reset = ne6xvf_nway_reset, + .self_test = ne6xvf_diag_test, +}; + +void ne6xvf_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ne6xvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h new file mode 100644 index 000000000000..300a90b6af55 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_ETHTOOL_H +#define _NE6XVF_ETHTOOL_H + +#include "ne6xvf.h" + +#define NE6XVF_STAT(_type, _name, _stat) \ +{ \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +enum ne6xvf_ethtool_test_id { + NE6XVF_ETH_TEST_REG = 0, + NE6XVF_ETH_TEST_EEPROM, + NE6XVF_ETH_TEST_INTR, + NE6XVF_ETH_TEST_LINK, +}; + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c new file mode 100644 index 000000000000..1de2173d7675 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c @@ -0,0 +1,3303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include + +#include "ne6xvf.h" +#include "ne6xvf_osdep.h" +#include "ne6xvf_virtchnl.h" +#include "ne6xvf_txrx.h" +#include "version.h" + +#define CREATE_TRACE_POINTS +#include "ne6x_trace.h" + +#define SUMMARY \ + "Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Virtual Function Linux Driver" +#define COPYRIGHT "Copyright (c) 2020 - 2023 Chengdu BeiZhongWangXin Technology Co., Ltd." + +char ne6xvf_driver_name[] = "ncevf"; +static const char ne6xvf_driver_string[] = SUMMARY; + +const char ne6xvf_driver_version[] = VERSION; +static const char ne6xvf_copyright[] = COPYRIGHT; + +static const struct pci_device_id ne6xvf_pci_tbl[] = { + {PCI_VDEVICE(BZWX, 0x501a), 0}, + {PCI_VDEVICE(BZWX, 0x601a), 0}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ne6xvf_pci_tbl); + +MODULE_AUTHOR("Chengdu BeiZhongWangXin Technology Co., Ltd., "); +MODULE_DESCRIPTION(SUMMARY); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); + +static const struct net_device_ops ne6xvf_netdev_ops; +struct workqueue_struct *ne6xvf_wq; +static void ne6xvf_sync_features(struct net_device *netdev); + +struct ne6xvf_adapter *ne6xvf_pdev_to_adapter(struct pci_dev *pdev) +{ + return netdev_priv(pci_get_drvdata(pdev)); +} + +void ne6xvf_schedule_reset(struct ne6xvf_adapter *adapter) +{ + adapter->flags |= NE6XVF_FLAG_RESET_NEEDED; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + adapter->tx_timeout_count++; + ne6xvf_schedule_reset(adapter); +} + +/** + * nce_get_vsi_stats_struct - Get System Network Statistics + * @vsi: the VSI we care about + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ + +struct net_device_stats *nce_get_vsi_stats_struct(struct ne6xvf_adapter *adapter) +{ + if (adapter->netdev) + return &adapter->netdev->stats; + else + return &adapter->net_stats; +} + +/** + * nce_update_pf_stats - Update PF port stats counters + * @pf: PF whose stats needs to be updated + */ +void ne6xvf_update_pf_stats(struct ne6xvf_adapter *adapter) +{ + struct net_device_stats *ns; /* netdev stats */ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + u64 bytes, packets; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u16 i; + + if (test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) + return; + + ns = nce_get_vsi_stats_struct(adapter); + + rx_p = 0; + rx_b = 0; + tx_p = 0; + tx_b = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_active_queues; i++) { + /* locate Tx ring */ + tx_ring = &adapter->tx_rings[i]; + + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + + tx_b += bytes; + tx_p += packets; + + rx_ring = &adapter->rx_rings[i]; + + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + rx_b += bytes; + rx_p += packets; + } + rcu_read_unlock(); + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + + adapter->net_stats.rx_packets = rx_p; + adapter->net_stats.tx_packets = rx_b; + adapter->net_stats.rx_bytes = rx_b; + adapter->net_stats.tx_bytes = tx_b; +} + +bool ne6xvf_is_remove_in_progress(struct ne6xvf_adapter *adapter) +{ + return test_bit(__NE6XVF_IN_REMOVE_TASK, &adapter->crit_section); +} + +static void ne6xvf_sdk_task(struct work_struct *work) +{ + struct ne6xvf_adapter *adapter = container_of(work, struct ne6xvf_adapter, sdk_task); + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_arq_event_info event; + enum ne6xvf_status ret, v_ret; + enum virtchnl_ops v_op; + u16 pending = 1u; + + if (ne6xvf_is_remove_in_progress(adapter)) + return; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + goto out; + + event.buf_len = NE6XVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + goto out; + + do { + ret = ne6xvf_clean_arq_element(hw, &event, &pending); + v_op = (enum virtchnl_ops)le32_to_cpu(event.snap.type); + v_ret = (enum ne6xvf_status)le32_to_cpu(event.snap.state); + + if (ret || !v_op) + break; /* No event to process or error cleaning ARQ */ + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + ne6xvf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + if (pending != 0) + memset(event.msg_buf, 0, NE6XVF_MAX_AQ_BUF_SIZE); + } while (pending); + + if ((adapter->flags & (NE6XVF_FLAG_RESET_PENDING | NE6XVF_FLAG_RESET_NEEDED)) || + adapter->state == __NE6XVF_RESETTING) + goto freedom; + +freedom: + kfree(event.msg_buf); + +out: + return; +} + +static int ne6xvf_check_reset_complete(struct ne6xvf_hw *hw) +{ + u64 rstat; + int i; + + for (i = 0; i < NE6XVF_RESET_WAIT_COMPLETE_COUNT; i++) { + rstat = rd64(hw, NE6XVF_REG_ADDR(0, NE6X_VP_RELOAD)); + if (rstat) + return 0; + + usleep_range(10, 20); + } + + return 0; +} + +int ne6xvf_init_sdk_mbx(struct ne6xvf_hw *hw) +{ + union u_ne6x_mbx_snap_buffer_data mbx_buffer; + union u_ne6x_mbx_snap_buffer_data usnap; + u64 val; + + if (hw->mbx.init_flag) + return -1; + + hw->mbx.sq_data.state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + hw->mbx.sq_data.type = VIRTCHNL_OP_UNKNOWN; + hw->mbx.init_flag = 0x1; + + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + if (val & 0x2) { + usnap.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_PF_MAILBOX_DATA)); + mbx_buffer.snap.state = usnap.snap.state; + mbx_buffer.snap.type = usnap.snap.type; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_MAILBOX_DATA), mbx_buffer.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x2); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x1); + } + + usleep_range(10, 20); + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + + if (val & 0x1) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x1); + + return 0; +} + +static void ne6xvf_startup(struct ne6xvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ne6xvf_hw *hw = &adapter->hw; + int ret; + + WARN_ON(adapter->state != __NE6XVF_STARTUP); + + adapter->flags &= ~NE6XVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~NE6XVF_FLAG_RESET_PENDING; + + ret = ne6xvf_check_reset_complete(hw); + if (ret) { + dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", ret); + goto err; + } + + ret = ne6xvf_init_sdk_mbx(hw); + if (ret) { + dev_err(&pdev->dev, "Failed to init SDK (%d)\n", ret); + goto err; + } + + ne6xvf_change_state(adapter, __NE6XVF_INIT_GET_RESOURCES); + + return; + +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES + * @adapter: board private structure + */ +int ne6xvf_parse_vf_resource_msg(struct ne6xvf_adapter *adapter) +{ + int i, num_req_queues = adapter->num_req_queues; + + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == NE6XVF_VIRTCHNL_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (num_req_queues && num_req_queues > adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, adapter->vsi_res->num_queue_pairs); + adapter->flags |= NE6XVF_FLAG_REINIT_MSIX_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + ne6xvf_schedule_reset(adapter); + + return -EAGAIN; + } + adapter->num_req_queues = 0; + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + return 0; +} + +/** + * ne6xvf_init_get_resources - third step of driver startup + * @adapter: board private structure + * + * Function process __NE6XVF_INIT_GET_RESOURCES driver state and + * finishes driver initialization procedure. + * When success the state is changed to __NE6XVF_DOWN + * when fails the state is changed to __NE6XVF_INIT_FAILED + **/ +static void ne6xvf_init_get_resources(struct ne6xvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int ret; + + WARN_ON(adapter->state != __NE6XVF_INIT_GET_RESOURCES); + + if (!adapter->vf_res) { + adapter->vf_res = kzalloc(struct_size(adapter->vf_res, vsi_res, 1), GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + + adapter->hw_feature = 0x00; + ret = ne6xvf_send_vf_config_msg(adapter, true); + if (ret) { + dev_err(&pdev->dev, "Unable to send config request (%d)\n", ret); + goto err; + } + + ret = ne6xvf_get_vf_config(adapter); + if (ret == NE6XVF_ERR_ADMIN_QUEUE_NO_WORK) { + ret = ne6xvf_send_vf_config_msg(adapter, true); + goto err_alloc; + } else if (ret == NE6XVF_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + dev_err(&pdev->dev, + "Unable to get VF config due to PF error condition, not retrying\n"); + return; + } + + if (ret) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", ret); + goto err_alloc; + } + + ret = ne6xvf_parse_vf_resource_msg(adapter); + if (ret) { + dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", ret); + goto err_alloc; + } + + ne6xvf_change_state(adapter, __NE6XVF_INIT_EXTENDED_CAPS); + return; + +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_napi_disable_all - disable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void ne6xvf_napi_disable_all(struct ne6xvf_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors; + struct ne6x_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + napi_disable(&q_vector->napi); + } +} + +static void ne6xvf_free_queues(struct ne6xvf_adapter *adapter) +{ + if (!adapter->vsi_res) + return; + + adapter->num_active_queues = 0; + kfree(adapter->tg_rings); + adapter->tg_rings = NULL; + kfree(adapter->cq_rings); + adapter->cq_rings = NULL; + kfree(adapter->tx_rings); + adapter->tx_rings = NULL; + kfree(adapter->rx_rings); + adapter->rx_rings = NULL; +} + +/** + * ne6xvf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ne6xvf_alloc_queues(struct ne6xvf_adapter *adapter) +{ + int i, num_active_queues; + + /* If we're in reset reallocating queues we don't actually know yet for + * certain the PF gave us the number of queues we asked for but we'll + * assume it did. Once basic reset is finished we'll confirm once we + * start negotiating config with PF. + */ + if (adapter->num_req_queues) + num_active_queues = adapter->num_req_queues; + else + num_active_queues = min_t(int, adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); + + adapter->tg_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + adapter->cq_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + + adapter->tx_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + if (!adapter->tx_rings) + goto err_out; + + adapter->rx_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + if (!adapter->rx_rings) + goto err_out; + + for (i = 0; i < num_active_queues; i++) { + struct ne6x_ring *tg_ring; + struct ne6x_ring *cq_ring; + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + + tg_ring = &adapter->tg_rings[i]; + tg_ring->queue_index = i; + tg_ring->netdev = adapter->netdev; + tg_ring->dev = pci_dev_to_dev(adapter->pdev); + tg_ring->adpt = adapter; + tg_ring->count = adapter->tx_desc_count; + + cq_ring = &adapter->cq_rings[i]; + cq_ring->queue_index = i; + cq_ring->netdev = adapter->netdev; + cq_ring->dev = pci_dev_to_dev(adapter->pdev); + cq_ring->adpt = adapter; + cq_ring->count = adapter->cq_desc_count; + + tx_ring = &adapter->tx_rings[i]; + tx_ring->queue_index = i; + tx_ring->netdev = adapter->netdev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->adpt = adapter; + tx_ring->count = adapter->tx_desc_count; + + rx_ring = &adapter->rx_rings[i]; + rx_ring->queue_index = i; + rx_ring->netdev = adapter->netdev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->adpt = adapter; + rx_ring->count = adapter->rx_desc_count; + } + + adapter->max_queues = num_active_queues; + adapter->num_active_queues = adapter->max_queues; + + return 0; + +err_out: + ne6xvf_free_queues(adapter); + return -ENOMEM; +} + +static void ne6xvf_irq_disable(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int i; + + if (!adapter->msix_entries) + return; + + for (i = 0; i < adapter->num_msix_vectors; i++) { + wr64(hw, NE6XVF_REG_ADDR(i, NE6X_VP_INT_MASK), 0xffffffffffffffff); + synchronize_irq(adapter->msix_entries[i].vector); + } +} + +static void ne6xvf_free_traffic_irqs(struct ne6xvf_adapter *adapter) +{ + int vector, irq_num, q_vectors; + + if (!adapter->msix_entries) + return; + + q_vectors = adapter->num_active_queues; + + for (vector = 0; vector < q_vectors; vector++) { + irq_num = adapter->msix_entries[vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } +} + +static void ne6xvf_free_q_vectors(struct ne6xvf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + if (!adapter->q_vectors) + return; + + num_q_vectors = adapter->num_msix_vectors; + napi_vectors = adapter->num_active_queues; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = &adapter->q_vectors[q_idx]; + + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + } + + kfree(adapter->q_vectors); + adapter->q_vectors = NULL; +} + +/** + * ne6xvf_disable_vf - disable a VF that failed to reset + * @adapter: private adapter structure + * + * Helper function to shut down the VF when a reset never finishes. + **/ +static void ne6xvf_disable_vf(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *fv, *fvtmp; + struct ne6xvf_mac_filter *f, *ftmp; + + /* reset never finished */ + adapter->flags |= NE6XVF_FLAG_PF_COMMS_FAILED; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (!test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) { + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + ne6xvf_irq_disable(adapter); + ne6xvf_napi_disable_all(adapter); + ne6xvf_free_traffic_irqs(adapter); + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* Delete all of the filters */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { + list_del(&fv->list); + kfree(fv); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + ne6xvf_free_queues(adapter); + memset(adapter->vf_res, 0, struct_size(adapter->vf_res, vsi_res, 1)); + adapter->netdev->flags &= ~IFF_UP; + adapter->flags &= ~NE6XVF_FLAG_RESET_PENDING; + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); +} + +/** + * ne6xvf_acquire_msix_vectors - Setup the MSIX capability + * @adapter: board private structure + * @vectors: number of vectors to request + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_acquire_msix_vectors(struct ne6xvf_adapter *adapter, int vectors) +{ + int v_actual; + + /* We'll want at least 3 (vector_threshold): + * 0) Other (Admin Queue and link, mostly) + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + * + * The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + v_actual = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1, vectors); + if (v_actual != vectors) { + dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts: %d\n", v_actual); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + pci_disable_msi(adapter->pdev); + return v_actual; + } + + adapter->num_msix_vectors = v_actual; + + return 0; +} + +/** + * ne6xvf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ne6xvf_set_interrupt_capability(struct ne6xvf_adapter *adapter) +{ + int vector, v_budget; + int err = 0; + + if (!adapter->vsi_res) + return -EIO; + + v_budget = adapter->num_active_queues; + adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + dev_info(&adapter->pdev->dev, "v_budget:%d, adapter->vf_res->max_vectors: %d\n", v_budget, + adapter->vf_res->max_vectors); + err = ne6xvf_acquire_msix_vectors(adapter, v_budget); +out: + netif_set_real_num_rx_queues(adapter->netdev, v_budget); + netif_set_real_num_tx_queues(adapter->netdev, v_budget); + + return err; +} + +/** + * ne6xvf_fill_rss_lut - Fill the lut with default values + * @adapter: board private structure + **/ +void ne6xvf_fill_rss_lut(struct ne6xvf_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + adapter->rss_info.ind_table[i] = i % adapter->num_active_queues; +} + +/** + * ne6xvf_init_rss - Prepare for RSS + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_init_rss(struct ne6xvf_adapter *adapter) +{ + struct ne6x_rss_info *rss_info = &adapter->rss_info; + + /* begin rss info */ + rss_info->hash_type = NE6X_RSS_HASH_TYPE_IPV4_TCP | + NE6X_RSS_HASH_TYPE_IPV4_UDP | + NE6X_RSS_HASH_TYPE_IPV4 | + NE6X_RSS_HASH_TYPE_IPV6_TCP | + NE6X_RSS_HASH_TYPE_IPV6_UDP | + NE6X_RSS_HASH_TYPE_IPV6; + rss_info->hash_func = NE6X_RSS_HASH_FUNC_TOEPLITZ; + rss_info->hash_key_size = NE6X_RSS_MAX_KEY_SIZE; + rss_info->ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + ne6xvf_fill_rss_lut(adapter); + netdev_rss_key_fill((void *)&adapter->rss_info.hash_key[0], + adapter->rss_info.hash_key_size); + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + return 0; +} + +/** + * ne6xvf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ne6xvf_alloc_q_vectors(struct ne6xvf_adapter *adapter) +{ + struct ne6x_q_vector *q_vector; + int q_idx, num_q_vectors; + + num_q_vectors = adapter->num_active_queues; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), GFP_KERNEL); + if (!adapter->q_vectors) + return -ENOMEM; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + q_vector->adpt = adapter; + q_vector->v_idx = q_idx; + q_vector->reg_idx = q_idx; + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + netif_napi_add(adapter->netdev, &q_vector->napi, ne6xvf_napi_poll); + } + + return 0; +} + +/** + * ne6xvf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +int ne6xvf_init_interrupt_scheme(struct ne6xvf_adapter *adapter) +{ + int err; + + err = ne6xvf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + rtnl_lock(); + err = ne6xvf_set_interrupt_capability(adapter); + rtnl_unlock(); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ne6xvf_alloc_q_vectors(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); + + return 0; + +err_alloc_q_vectors: + ne6xvf_reset_interrupt_capability(adapter); +err_set_interrupt: + ne6xvf_free_queues(adapter); +err_alloc_queues: + return err; +} + +/** + * ne6xvf_map_vector_to_cq - associate irqs with complete queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void ne6xvf_map_vector_to_cq(struct ne6xvf_adapter *adapter, int v_idx, int r_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *cq_ring = &adapter->cq_rings[r_idx]; + + cq_ring->q_vector = q_vector; + cq_ring->next = q_vector->cq.ring; + q_vector->cq.ring = cq_ring; + q_vector->cq.count++; +} + +/** + * ne6xvf_map_vector_to_rxq - associate irqs with rx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void ne6xvf_map_vector_to_rxq(struct ne6xvf_adapter *adapter, int v_idx, int r_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *rx_ring = &adapter->rx_rings[r_idx]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +/** + * ne6xvf_map_vector_to_txq - associate irqs with tx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @t_idx: queue number + **/ +static void ne6xvf_map_vector_to_txq(struct ne6xvf_adapter *adapter, int v_idx, int t_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *tx_ring = &adapter->tx_rings[t_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + q_vector->num_ringpairs++; +} + +/** + * ne6xvf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static void ne6xvf_map_rings_to_vectors(struct ne6xvf_adapter *adapter) +{ + int rings_remaining = adapter->num_active_queues; + int q_vectors; + int ridx; + + q_vectors = adapter->num_msix_vectors; + + for (ridx = 0; ridx < rings_remaining; ridx++) { + ne6xvf_map_vector_to_cq(adapter, ridx, ridx); + ne6xvf_map_vector_to_rxq(adapter, ridx, ridx); + ne6xvf_map_vector_to_txq(adapter, ridx, ridx); + } +} + +/** + * ne6xvf_setup_all_tg_resources - allocate all queues Tg resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_tg_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tg_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_tg_descriptors(&adapter->tg_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "tg Allocation for complete Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_cq_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_cq_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->cq_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_cq_descriptors(&adapter->cq_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for complete Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_tx_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tx_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_tx_descriptors(&adapter->tx_rings[i]); + err |= ne6x_setup_tx_sgl(&adapter->tx_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_rx_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->rx_rings[i].count = adapter->rx_desc_count; + err = ne6x_setup_rx_descriptors(&adapter->rx_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for Rx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t ne6xvf_msix_clean_rings(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6xvf_adapter *adpt = (struct ne6xvf_adapter *)q_vector->adpt; + u64 val; + + if (!q_vector->tx.ring && !q_vector->rx.ring && !q_vector->cq.ring) + return IRQ_HANDLED; + + napi_schedule_irqoff(&q_vector->napi); + val = rd64(&adpt->hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK)); + val |= 1ULL << NE6X_VP_CQ_INTSHIFT; + wr64(&adpt->hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK), val); + + return IRQ_HANDLED; +} + +/** + * ne6xvf_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void ne6xvf_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct ne6x_q_vector *q_vector; + + q_vector = container_of(notify, struct ne6x_q_vector, affinity_notify); + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * ne6xvf_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void ne6xvf_irq_affinity_release(struct kref *ref) {} + +/** + * ne6xvf_request_traffic_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * @basename: device basename + * + * Allocates MSI-X vectors for tx and rx handling, and requests + * interrupts from the kernel. + **/ +static int ne6xvf_request_traffic_irqs(struct ne6xvf_adapter *adapter, char *basename) +{ + unsigned int rx_int_idx = 0, tx_int_idx = 0; + unsigned int vector, q_vectors; + int irq_num, err; + int cpu; + + ne6xvf_irq_disable(adapter); + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_active_queues; + + for (vector = 0; vector < q_vectors; vector++) { + struct ne6x_q_vector *q_vector = &adapter->q_vectors[vector]; + + irq_num = adapter->msix_entries[vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), "ne6xvf-%s-TxRx-%u", + basename, rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "ne6xvf-%s-rx-%u", basename, + rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "ne6xvf-%s-tx-%u", basename, + tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(irq_num, ne6xvf_msix_clean_rings, 0, q_vector->name, q_vector); + if (err) { + dev_info(&adapter->pdev->dev, "Request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ne6xvf_irq_affinity_notify; + q_vector->affinity_notify.release = ne6xvf_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = adapter->msix_entries[vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } + + return err; +} + +/** + * ne6xvf_configure_queues + * @adapter: adapter structure + * + * Request that the PF set up our (previously allocated) queues. + **/ +void ne6xvf_configure_queues(struct ne6xvf_adapter *adapter) +{ + unsigned int rx_buf_len = NE6X_RXBUFFER_2048; + struct ne6xvf_hw *hw = &adapter->hw; + union ne6x_sq_base_addr sq_base_addr; + union ne6x_rq_base_addr rq_base_addr; + union ne6x_rq_block_cfg rq_block_cfg; + union ne6x_cq_base_addr cq_base_addr; + union ne6x_cq_cfg cq_cfg; + union ne6x_sq_cfg sq_cfg; + union ne6x_rq_cfg rc_cfg; + int i; + + /* Legacy Rx will always default to a 2048 buffer size. */ +#if (PAGE_SIZE < 8192) + if (!(adapter->flags & NE6XVF_FLAG_LEGACY_RX)) + /* For jumbo frames on systems with 4K pages we have to use + * an order 1 page, so we might as well increase the size + * of our Rx buffer to make better use of the available space + */ + rx_buf_len = NE6X_RXBUFFER_4096; +#endif + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + for (i = 0; i < adapter->num_active_queues; i++) { + /* cq */ + /* cache tail for quicker writes, and clear the reg before use */ + adapter->cq_rings[i].tail = (u64 __iomem *)(hw->hw_addr0 + NE6XVF_QC_TAIL1(i)); + adapter->cq_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + cq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = adapter->cq_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_BASE_ADDR), cq_base_addr.val); + + cq_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = adapter->cq_rings[i].count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_CFG), cq_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_TAIL_POINTER), 0x0); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_HD_POINTER), 0x0); + + /* tx */ + /* cache tail off for easier writes later */ + adapter->tx_rings[i].tail = (u64 __iomem *)(hw->hw_addr2 + NE6XVF_QTX_TAIL1(i)); + adapter->tx_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + sq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = adapter->tx_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_BASE_ADDR), sq_base_addr.val); + + sq_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = adapter->tx_rings[i].count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_CFG), sq_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_HD_POINTER), 0x0); + + /* rx */ + /* cache tail for quicker writes, and clear the reg before use */ + adapter->rx_rings[i].tail = (u64 __iomem *)(hw->hw_addr2 + NE6XVF_QRX_TAIL1(i)); + adapter->rx_rings[i].rx_buf_len = rx_buf_len; + adapter->rx_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + rq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = adapter->rx_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BASE_ADDR), rq_base_addr.val); + + rq_block_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = adapter->rx_rings[i].rx_buf_len; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BLOCK_CFG), rq_block_cfg.val); + + rc_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = adapter->rx_rings[i].count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_CFG), rc_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_HD_POINTER), 0x0); + } + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x0); + + usleep_range(100, 120); +} + +/** + * ne6xvf_configure - set up transmit and receive data structures + * @adapter: board private structure + **/ +static void ne6xvf_configure(struct ne6xvf_adapter *adapter) +{ + int i; + + ne6xvf_configure_queues(adapter); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct ne6x_ring *ring = &adapter->rx_rings[i]; + + ne6x_alloc_rx_buffers(ring, NE6X_DESC_UNUSED(ring)); + usleep_range(1000, 2000); + } +} + +/** + * ne6xvf_napi_enable_all - enable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void ne6xvf_napi_enable_all(struct ne6xvf_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors; + struct ne6x_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + + q_vector = &adapter->q_vectors[q_idx]; + napi = &q_vector->napi; + napi_enable(napi); + } +} + +/** + * ne6xvf_up_complete - Finish the last steps of bringing up a connection + * @adapter: board private structure + * + * Expects to be called while holding the __NE6XVF_IN_CRITICAL_TASK bit lock. + **/ +static void ne6xvf_up_complete(struct ne6xvf_adapter *adapter) +{ + ne6xvf_change_state(adapter, __NE6XVF_RUNNING); + clear_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + ne6xvf_napi_enable_all(adapter); + + adapter->aq_required |= NE6XVF_FLAG_AQ_ENABLE_QUEUES; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +/** + * ne6xvf_reinit_interrupt_scheme - Reallocate queues and vectors + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_reinit_interrupt_scheme(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (!test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + ne6xvf_free_queues(adapter); + + err = ne6xvf_init_interrupt_scheme(adapter); + if (err) + goto err; + + netif_tx_stop_all_queues(netdev); + + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + ne6xvf_map_rings_to_vectors(adapter); +err: + return err; +} + +static void ne6xvf_get_port_link_status(struct ne6xvf_adapter *adapter); + +/** + * ne6xvf_handle_reset - Handle hardware reset + * @adapter: pointer to ne6xvf_adapter + * + * During reset we need to shut down and reinitialize the admin queue + * before we can use it to communicate with the PF again. We also clear + * and reinit the rings because that context is lost as well. + * + * This function is called in the __NE6XVF_RESETTING driver state. If a reset + * is detected and completes, the driver state changed to __NE6XVF_RUNNING or + * __NE6XVF_DOWN, else driver state will remain in __NE6XVF_RESETTING. + * + * The function is called with the NE6XVF_FLAG_RESET_PENDING flag set and it is + * cleared when a reset is detected and completes. + **/ +static void ne6xvf_handle_reset(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_hw *hw = &adapter->hw; + bool running; + int err, i; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = (adapter->last_state == __NE6XVF_RUNNING); + + if (running) { + netdev->flags &= ~IFF_UP; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + adapter->link_up = false; + ne6xvf_napi_disable_all(adapter); + } + + pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); + + ne6xvf_irq_disable(adapter); + + for (i = 0; i < adapter->num_msix_vectors; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + /* free the Tx/Rx rings and descriptors, might be better to just + * re-use them sometime in the future + */ + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + + /* Set the queues_disabled flag when VF is going through reset + * to avoid a race condition especially for ADQ i.e. when a VF ADQ is + * configured, PF resets the VF to allocate ADQ resources. When this + * happens there's a possibility to hit a condition where VF is in + * running state but the queues haven't been enabled yet. So wait for + * virtchnl success message for enable queues and then unset this flag. + * Don't allow the link to come back up until that happens. + */ + adapter->flags |= NE6XVF_FLAG_QUEUES_DISABLED; + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required = 0; + + err = ne6xvf_reinit_interrupt_scheme(adapter); + if (err) + goto reset_err; + + adapter->aq_required |= NE6XVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= NE6XVF_FLAG_AQ_MAP_VECTORS; + + /* We were running when the reset started, so we need + * to restore some state here. + */ + if (running) { + err = ne6xvf_setup_all_tg_resources(adapter); + if (err) + goto reset_err; + + err = ne6xvf_setup_all_cq_resources(adapter); + if (err) + goto reset_err; + + /* allocate transmit descriptors */ + err = ne6xvf_setup_all_tx_resources(adapter); + if (err) + goto reset_err; + + /* allocate receive descriptors */ + err = ne6xvf_setup_all_rx_resources(adapter); + if (err) + goto reset_err; + + if ((adapter->flags & NE6XVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & NE6XVF_FLAG_REINIT_ITR_NEEDED)) { + err = ne6xvf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto reset_err; + + adapter->flags &= ~NE6XVF_FLAG_REINIT_MSIX_NEEDED; + } + + ne6xvf_configure(adapter); + + /* ne6xvf_up_complete() will switch device back + * to __NE6XVF_RUNNING + */ + ne6xvf_up_complete(adapter); + + ne6xvf_irq_enable(adapter, true); + + ne6xvf_get_port_link_status(adapter); + + netdev->flags |= IFF_UP; + } else { + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + } + + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + + return; + +reset_err: + if (running) { + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + ne6xvf_free_traffic_irqs(adapter); + netdev->flags &= ~IFF_UP; + } + + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); + ne6xvf_disable_vf(adapter); +} + +/** + * ne6xvf_init_process_extended_caps - Part of driver startup + * @adapter: board private structure + * + * Function processes __NE6XVF_INIT_EXTENDED_CAPS driver state. This state + * handles negotiating capabilities for features which require an additional + * message. + * + * Once all extended capabilities exchanges are finished, the driver will + * transition into __NE6XVF_INIT_CONFIG_ADAPTER. + */ +static void ne6xvf_init_process_extended_caps(struct ne6xvf_adapter *adapter) +{ + WARN_ON(adapter->state != __NE6XVF_INIT_EXTENDED_CAPS); + + /* When we reach here, no further extended capabilities exchanges are + * necessary, so we finally transition into __NE6XVF_INIT_CONFIG_ADAPTER + */ + adapter->vsi_res->num_queue_pairs = adapter->vf_res->num_queue_pairs; + adapter->hw_feature = 0x00; + ne6xvf_change_state(adapter, __NE6XVF_INIT_CONFIG_ADAPTER); +} + +/** + * ne6xvf_process_config - Process the config information we got from the PF + * @adapter: board private structure + * + * Verify that we have a valid config struct, and set up our netdev features + * and our VSI struct. + **/ +int ne6xvf_process_config(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + netdev_features_t csumo_features; + netdev_features_t vlano_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_LRO | + NETIF_F_LOOPBACK | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4 | + NETIF_F_GSO_SCTP | + 0; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; + netdev->vlan_features |= dflt_features | csumo_features | tso_features; + netdev->hw_features |= NETIF_F_HW_TC; + + /* advertise support but don't enable by default since only one type of + * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one + * type turns on the other has to be turned off. This is enforced by the + * nce_fix_features() ndo callback. + */ + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_FILTER; + + netdev->gso_max_size = 65535; + netdev->features = netdev->hw_features; + ne6xvf_sync_features(netdev); + + return 0; +} + +/** + * ne6xvf_init_config_adapter - last part of driver startup + * @adapter: board private structure + * + * After all the supported capabilities are negotiated, then the + * __NE6XVF_INIT_CONFIG_ADAPTER state will finish driver initialization. + */ +static void ne6xvf_init_config_adapter(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int ret; + + WARN_ON(adapter->state != __NE6XVF_INIT_CONFIG_ADAPTER); + + if (ne6xvf_process_config(adapter)) + goto err; + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + adapter->flags |= NE6XVF_FLAG_RX_CSUM_ENABLED; + + netdev->netdev_ops = &ne6xvf_netdev_ops; + ne6xvf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->min_mtu = NE6X_MIN_MTU_SIZE; + netdev->max_mtu = NE6X_MAX_RXBUFFER - ETH_HLEN - ETH_FCS_LEN; + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", + adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + eth_hw_addr_set(netdev, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + } + + adapter->tx_desc_count = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adapter->rx_desc_count = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adapter->cq_desc_count = adapter->tx_desc_count + adapter->rx_desc_count; + ret = ne6xvf_init_interrupt_scheme(adapter); + if (ret) + goto err_sw_init; + + ne6xvf_map_rings_to_vectors(adapter); + + netif_carrier_off(netdev); + adapter->link_up = false; + if (!adapter->netdev_registered) { + ret = ne6xvf_register_netdev(adapter); + if (ret) + goto err_register; + } + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + wake_up(&adapter->down_waitqueue); + ne6xvf_init_rss(adapter); + adapter->trusted = 0; + return; + +err_register: +err_sw_init: + ne6xvf_reset_interrupt_capability(adapter); +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_process_aq_command - process aq_required flags + * and sends aq command + * @adapter: pointer to ne6xvf adapter structure + * + * Returns 0 on success + * Returns error code if no command was sent + * or error code if the command failed. + **/ +static int ne6xvf_process_aq_command(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + + if (adapter->aq_required & NE6XVF_FLAG_AQ_GET_CONFIG) + return ne6xvf_send_vf_config_msg(adapter, false); + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD) + return ne6xvf_send_vf_offload_msg(adapter); + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_RSS) { + ne6xvf_config_rss_info(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CHANGED_RSS) { + ne6xvf_changed_rss(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_QUEUES) { + if (ne6xvf_request_queues(adapter, adapter->num_active_queues) == 0) { + usleep_range(50, 100); + if (ne6xvf_poll_virtchnl_msg(adapter, &event, + VIRTCHNL_OP_REQUEST_QUEUES) == 0) { + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + } + } + return 0; + } + if (adapter->aq_required & NE6XVF_FLAG_AQ_ENABLE_QUEUES) { + ne6xvf_enable_queues(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS) { + ne6xvf_vchanel_get_port_link_status(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_SET_VF_MAC) { + ne6xvf_set_vf_addr(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_ADD_MAC_FILTER) { + ne6xvf_add_ether_addrs(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_DEL_MAC_FILTER) { + ne6xvf_del_ether_addrs(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_ADD_VLAN_FILTER) { + ne6xvf_add_vlans(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_DEL_VLAN_FILTER) { + ne6xvf_del_vlans(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_REQUEST_PROMISC) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_REQUEST_PROMISC; + ne6xvf_set_promiscuous(adapter); + + return 0; + } + return -EAGAIN; +} + +/** + * ne6xvf_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +bool ne6xvf_asq_done(struct ne6xvf_hw *hw) +{ + return 1; +} + +/** + * ne6xvf_register_netdev - register netdev + * @adapter: pointer to the ne6xvf_adapter struct + * + * Returns 0 if register netdev success + **/ +int ne6xvf_register_netdev(struct ne6xvf_adapter *adapter) +{ + char newname[IFNAMSIZ] = {0}; + int ret; + u16 domain_num; + + domain_num = pci_domain_nr(adapter->pdev->bus); + + /* There are some pcie device with the same bus number but with different + * pcie domain, the name of netdev should contain pcie domain number + */ + if (domain_num) + sprintf(newname, "enP%dp%ds0f%dv%d", domain_num, adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport, + adapter->hw.dev_caps.vf_id % adapter->hw.dev_caps.num_vf_per_pf); + else + sprintf(newname, "enp%ds0f%dv%d", adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport, + adapter->hw.dev_caps.vf_id % adapter->hw.dev_caps.num_vf_per_pf); + + strcpy(&adapter->netdev->name[0], newname); + dev_info(&adapter->pdev->dev, "name: %s\n", newname); + ret = register_netdev(adapter->netdev); + if (ret) { + sprintf(newname, "enp%ds0f%dv%%d", adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport); + strcpy(&adapter->netdev->name[0], newname); + ret = register_netdev(adapter->netdev); + } + return ret; +} + +static void ne6xvf_watchdog_task(struct work_struct *work) +{ + struct ne6xvf_adapter *adapter = container_of(work, struct ne6xvf_adapter, + watchdog_task.work); + struct ne6xvf_hw *hw = &adapter->hw; + + if (ne6xvf_is_remove_in_progress(adapter)) + return; + + if (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + ne6xvf_change_state(adapter, __NE6XVF_COMM_FAILED); + + if (adapter->flags & NE6XVF_FLAG_RESET_NEEDED && adapter->state != __NE6XVF_RESETTING) { + adapter->flags &= ~NE6XVF_FLAG_RESET_NEEDED; + ne6xvf_change_state(adapter, __NE6XVF_RESETTING); + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + } + switch (adapter->state) { + case __NE6XVF_INIT_FAILED: + /* Try again from failed step */ + ne6xvf_change_state(adapter, adapter->last_state); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, HZ); + return; + case __NE6XVF_COMM_FAILED: + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); + return; + case __NE6XVF_RESETTING: + ne6xvf_handle_reset(adapter); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_work(ne6xvf_wq, &adapter->watchdog_task.work); + return; + case __NE6XVF_DOWN: + case __NE6XVF_DOWN_PENDING: + case __NE6XVF_TESTING: + case __NE6XVF_RUNNING: + if (adapter->current_op) { + if (!ne6xvf_asq_done(hw)) { + dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); + ne6xvf_send_api_ver(adapter); + } + } else { + int ret = ne6xvf_process_aq_command(adapter); + + /* An error will be returned if no commands were + * processed; use this opportunity to update stats + * if the error isn't -EOPNOTSUPP + */ + if (ret && ret != -EOPNOTSUPP && adapter->state == __NE6XVF_RUNNING) + ne6xvf_request_stats(adapter); + } + break; + case __NE6XVF_REMOVE: + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + return; + default: + break; + } + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + +restart_watchdog: + queue_work(ne6xvf_wq, &adapter->sdk_task); + if (adapter->aq_required) + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); + else + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(1000)); +} + +inline void ne6xvf_init_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_init((struct mutex *)sp); +} + +void ne6xvf_acquire_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_lock((struct mutex *)sp); +} + +void ne6xvf_release_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_unlock((struct mutex *)sp); +} + +void ne6xvf_destroy_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_destroy((struct mutex *)sp); +} + +/** + * ne6xvf_find_filter - Search filter list for specific mac filter + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct ne6xvf_mac_filter *ne6xvf_find_filter(struct ne6xvf_adapter *adapter, + const u8 *macaddr) +{ + struct ne6xvf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + + return NULL; +} + +/** + * ne6xvf_add_filter - Add a mac filter to the filter list + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_mac_filter *ne6xvf_add_filter(struct ne6xvf_adapter *adapter, + const u8 *macaddr) +{ + struct ne6xvf_mac_filter *f; + + if (!macaddr) + return NULL; + + f = ne6xvf_find_filter(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + + list_add_tail(&f->list, &adapter->mac_filter_list); + f->add = true; + f->add_handled = false; + f->is_new_mac = true; + f->is_primary = false; + adapter->aq_required |= NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + } else { + f->remove = false; + } + + return f; +} + +/** + * ne6xvf_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __NE6XVF_IN_CRITICAL_TASK bit lock. + **/ +void ne6xvf_down(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *vlf; + struct ne6xvf_mac_filter *f; + + if (adapter->state <= __NE6XVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + ne6xvf_irq_disable(adapter); + ne6xvf_napi_disable_all(adapter); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* clear the sync flag on all filters */ + __dev_uc_unsync(adapter->netdev, NULL); + __dev_mc_unsync(adapter->netdev, NULL); + + /* remove all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) + f->remove = true; + + /* remove all VLAN filters */ + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) + vlf->remove = true; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (!(adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __NE6XVF_RESETTING) { + dev_info(&adapter->pdev->dev, "%s: state->%s\n", __func__, + ne6xvf_state_str(adapter->state)); + /* cancel any current operation */ + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + /* Schedule operations to close down the HW. Don't wait + * here for this to complete. The watchdog is still running + * and it will take care of this. + */ + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + + /* In case the queue configure or enable operations are still + * pending from when the interface was opened, make sure + * they're canceled here. + */ + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + } + + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_get_port_link_status(struct ne6xvf_adapter *adapter) +{ + adapter->aq_required |= NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_set_vport_state(struct ne6xvf_adapter *adapter, int tx_state, int rx_state) +{ + if (rx_state) + adapter->hw_feature &= ~NE6X_F_RX_DISABLE; + else + adapter->hw_feature |= NE6X_F_RX_DISABLE; + + if (tx_state) + adapter->hw_feature &= ~NE6X_F_TX_DISABLE; + else + adapter->hw_feature |= NE6X_F_TX_DISABLE; + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +/** + * ne6xvf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog is started, + * and the stack is notified that the interface is ready. + **/ +int ne6xvf_open(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int err; + + netdev_info(netdev, "open !!!\n"); + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + err = -EIO; + goto unlock; + } + + if (adapter->state == __NE6XVF_RUNNING && !test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) { + dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); + err = 0; + goto unlock; + } + + if (adapter->state != __NE6XVF_DOWN) { + err = -EBUSY; + goto unlock; + } + err = ne6xvf_setup_all_tg_resources(adapter); + if (err) + goto err_setup_tg; + + err = ne6xvf_setup_all_cq_resources(adapter); + if (err) + goto err_setup_cq; + + /* allocate transmit descriptors */ + err = ne6xvf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ne6xvf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* clear any pending interrupts, may auto mask */ + err = ne6xvf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto err_req_irq; + + ne6xvf_configure(adapter); + + ne6xvf_up_complete(adapter); + + ne6xvf_irq_enable(adapter, true); + + ne6xvf_get_port_link_status(adapter); + + ne6xvf_set_vport_state(adapter, true, true); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return 0; + +err_req_irq: + ne6xvf_down(adapter); + ne6xvf_free_traffic_irqs(adapter); +err_setup_rx: + ne6xvf_free_all_rx_resources(adapter); +err_setup_tx: + ne6xvf_free_all_tx_resources(adapter); +err_setup_cq: + ne6xvf_free_all_cq_resources(adapter); +err_setup_tg: + ne6xvf_free_all_tg_resources(adapter); + +unlock: + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return err; +} + +/** + * ne6xvf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) + * are freed, along with all transmit and receive resources. + **/ +int ne6xvf_close(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6xvf_hw *hw = &adapter->hw; + int status; + int i; + + netdev_info(netdev, "close !!!\n"); + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->state <= __NE6XVF_DOWN_PENDING) { + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + return 0; + } + + ne6xvf_set_vport_state(adapter, false, false); + ne6xvf_down(adapter); + + for (i = 0; i < adapter->num_msix_vectors; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + ne6xvf_change_state(adapter, __NE6XVF_DOWN_PENDING); + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + if (adapter->state == __NE6XVF_DOWN_PENDING) + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + /* If we're closing the interface as part of driver removal then don't + * wait. The VF resources will be reinitialized when the hardware is + * reset. + */ + if (ne6xvf_is_remove_in_progress(adapter)) + return 0; + + /* We explicitly don't free resources here because the hardware is + * still active and can DMA into memory. Resources are cleared in + * ne6xvf_virtchnl_completion() after we get confirmation from the PF + * driver that the rings have been stopped. + * + * Also, we wait for state to transition to __NE6XVF_DOWN before + * returning. State change occurs in ne6xvf_virtchnl_completion() after + * VF resources are released (which occurs after PF driver processes and + * responds to admin queue commands). + */ + status = wait_event_timeout(adapter->down_waitqueue, adapter->state == __NE6XVF_DOWN, + msecs_to_jiffies(500)); + if (!status) + netdev_dbg(netdev, "Device resources not yet released\n"); + + return 0; +} + +/** + * ne6xvf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int ne6xvf_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (ne6xvf_add_filter(adapter, addr)) + return 0; + else + return -ENOMEM; +} + +/** + * ne6xvf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int ne6xvf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6xvf_mac_filter *f; + + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + f = ne6xvf_find_filter(adapter, addr); + if (f) { + f->remove = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + } + + return 0; +} + +/** + * ne6xvf_promiscuous_mode_changed - check if promiscuous mode bits changed + * @adapter: device specific adapter + */ +bool ne6xvf_promiscuous_mode_changed(struct ne6xvf_adapter *adapter) +{ + return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) & + (IFF_PROMISC | IFF_ALLMULTI); +} + +/** + * ne6xvf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +static void ne6xvf_set_rx_mode(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + __dev_uc_sync(netdev, ne6xvf_addr_sync, ne6xvf_addr_unsync); + __dev_mc_sync(netdev, ne6xvf_addr_sync, ne6xvf_addr_unsync); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (!adapter->trusted) { + adapter->hw_feature &= ~NE6X_F_PROMISC; + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + return; + } + + if (netdev->flags & IFF_PROMISC) { + adapter->flags |= NE6XVF_FLAG_PROMISC_ON; + adapter->flags |= NE6XVF_FLAG_ALLMULTI_ON; + } else if (netdev->flags & IFF_ALLMULTI) { + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags |= NE6XVF_FLAG_ALLMULTI_ON; + } else { + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + } + + adapter->aq_required |= NE6XVF_FLAG_AQ_REQUEST_PROMISC; +} + +/** + * ne6xvf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the watchdog task. + **/ +static struct net_device_stats *ne6xvf_get_stats(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (adapter->netdev) + return &adapter->netdev->stats; + else + return &adapter->net_stats; +} + +static void ne6xvf_sync_features(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + adapter->hw_feature |= NE6X_F_TX_UDP_TNL_SEG; + else + adapter->hw_feature &= ~NE6X_F_TX_UDP_TNL_SEG; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->hw_feature |= NE6X_F_RX_VLAN_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + adapter->hw_feature |= NE6X_F_TX_VLAN; + else + adapter->hw_feature &= ~NE6X_F_TX_VLAN; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) + adapter->hw_feature |= NE6X_F_RX_QINQ_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_TX) + adapter->hw_feature |= NE6X_F_TX_QINQ; + else + adapter->hw_feature &= ~NE6X_F_TX_QINQ; + + if (netdev->features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + adapter->hw_feature |= NE6X_F_RX_VLAN_FILTER; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_FILTER; + + if (netdev->features & NETIF_F_RXCSUM) + adapter->hw_feature |= NE6X_OFFLOAD_RXCSUM; + + if (netdev->features & NETIF_F_LRO) + adapter->hw_feature |= NE6X_OFFLOAD_LRO; + + if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->hw_feature |= NE6X_OFFLOAD_TSO; + + if (netdev->features & NETIF_F_IP_CSUM) + adapter->hw_feature |= NE6X_OFFLOAD_TXCSUM; + + if (netdev->features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + adapter->hw_feature |= NE6X_OFFLOAD_L2; + + if (netdev->features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_SCTP_CRC) + adapter->hw_feature |= NE6X_OFFLOAD_SCTP_CSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_SCTP_CSUM; + + dev_info(&adapter->pdev->dev, "%s: adapter->hw_feature = 0x%08x\n", __func__, + adapter->hw_feature); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; +} + +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_UDP_TNL_FEATURES (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM) + +/** + * nce_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static int ne6xvf_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (changed & (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + adapter->hw_feature |= NE6X_F_TX_UDP_TNL_SEG; + else + adapter->hw_feature &= ~NE6X_F_TX_UDP_TNL_SEG; + } + + if (changed & NETIF_VLAN_OFFLOAD_FEATURES || changed & NETIF_VLAN_FILTERING_FEATURES) { + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->hw_feature |= NE6X_F_RX_VLAN_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + adapter->hw_feature |= NE6X_F_TX_VLAN; + else + adapter->hw_feature &= ~NE6X_F_TX_VLAN; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + adapter->hw_feature |= NE6X_F_RX_QINQ_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + adapter->hw_feature |= NE6X_F_TX_QINQ; + else + adapter->hw_feature &= ~NE6X_F_TX_QINQ; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + adapter->hw_feature |= NE6X_F_RX_VLAN_FILTER; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_FILTER; + } + + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO)) { + if (features & NETIF_F_RXCSUM) + adapter->hw_feature |= NE6X_OFFLOAD_RXCSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (features & NETIF_F_LRO) + adapter->hw_feature |= NE6X_OFFLOAD_LRO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_LRO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->hw_feature |= NE6X_OFFLOAD_TSO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & NETIF_F_GSO_UDP) { + if (features & NETIF_F_GSO_UDP) + adapter->hw_feature |= NE6X_OFFLOAD_UFO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_UFO; + } + + if (changed & NETIF_F_IP_CSUM) { + if (features & NETIF_F_IP_CSUM) + adapter->hw_feature |= NE6X_OFFLOAD_TXCSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_TXCSUM; + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_RSS; + } + + if (changed & NETIF_F_HW_L2FW_DOFFLOAD) { + if (features & NETIF_F_HW_L2FW_DOFFLOAD) + adapter->hw_feature |= NE6X_OFFLOAD_L2; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_L2; + } + + if (changed & NETIF_F_SCTP_CRC) { + if (features & NETIF_F_SCTP_CRC) + adapter->hw_feature |= NE6X_OFFLOAD_SCTP_CSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_SCTP_CSUM; + } + + dev_info(&adapter->pdev->dev, "%s: adapter->hw_feature = 0x%08x\n", __func__, + adapter->hw_feature); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +/** + * nce_fix_features - fix the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static netdev_features_t ne6xvf_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (features & NETIF_VLAN_FILTERING_FEATURES) + features |= NETIF_VLAN_FILTERING_FEATURES; + + return features; +} + +/** + * ne6xvf_replace_primary_mac - Replace current primary address + * @adapter: board private structure + * @new_mac: new MAC address to be applied + * + * Replace current dev_addr and send request to PF for removal of previous + * primary MAC address filter and addition of new primary MAC filter. + * Return 0 for success, -ENOMEM for failure. + * + * Do not call this with mac_vlan_list_lock! + **/ +int ne6xvf_replace_primary_mac(struct ne6xvf_adapter *adapter, const u8 *new_mac) +{ + memcpy(adapter->hw.mac.addr, new_mac, 6); + adapter->aq_required |= NE6XVF_FLAG_AQ_SET_VF_MAC; + + /* schedule the watchdog task to immediately process the request */ + queue_work(ne6xvf_wq, &adapter->watchdog_task.work); + return 0; +} + +/** + * ne6xvf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_set_mac(struct net_device *netdev, void *p) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + int ret; + + netdev_info(netdev, "set mac address %pM\n", addr->sa_data); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (is_multicast_ether_addr(addr->sa_data)) { + netdev_err(netdev, "Invalid Ethernet address %pM\n", addr->sa_data); + return -EINVAL; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); + return 0; + } + + ret = ne6xvf_replace_primary_mac(adapter, addr->sa_data); + + if (ret) + return ret; + + ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, + ether_addr_equal(netdev->dev_addr, addr->sa_data), + msecs_to_jiffies(2500)); + + /* If ret < 0 then it means wait was interrupted. + * If ret == 0 then it means we got a timeout. + * else it means we got response for set MAC from PF, + * check if netdev MAC was updated to requested MAC, + * if yes then set MAC succeeded otherwise it failed return -EACCES + */ + netdev_info(netdev, "%s,%pM %pM\n", __func__, addr->sa_data, netdev->dev_addr); + if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) + return -EACCES; + + return 0; +} + +/** + * ne6xvf_do_ioctl - Handle network device specific ioctls + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + * + * Callback to handle the networking device specific ioctls. Used to handle + * the SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl requests that configure Tx and Rx + * timstamping support. + */ +static int ne6xvf_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + return 0; +} + +/** + * ne6xvf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu; + + if (new_mtu < NE6X_MIN_MTU_SIZE) { + netdev_err(netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > NE6X_MAX_RXBUFFER) { + netdev_err(netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + return 0; +} + +/** + * ne6xvf_find_vlan - Search filter list for specific vlan filter + * @vsi: board private structure + * @vlan: vlan tag + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_find_vlan(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +/** + * ne6xvf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +struct ne6xvf_vlan_filter *ne6xvf_add_vlan_list(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adapter->vlan_filter_list); + f->add = true; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * ne6xvf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +void ne6xvf_del_vlan_list(struct ne6xvf_adapter *adapter, struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (f) { + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * ne6xvf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_add_vlan(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adapter->vlan_filter_list); + f->add = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * ne6xvf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +static void ne6xvf_del_vlan(struct ne6xvf_adapter *adapter, struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (f) { + f->remove = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +static int ne6xvf_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_vf_vlan vlan; + + netdev_info(netdev, "%s:%d: proto:%04x vid:%d\n", __func__, __LINE__, + be16_to_cpu(proto), vid); + vlan = NE6X_VF_VLAN(vid, be16_to_cpu(proto)); + + if (!vid) + return 0; + + if (!ne6xvf_add_vlan(adapter, vlan)) + return -ENOMEM; + + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +static int ne6xvf_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_vf_vlan vlan; + + netdev_info(netdev, "%s:%d: proto:%04x vid:%d\n", __func__, __LINE__, + be16_to_cpu(proto), vid); + vlan = NE6X_VF_VLAN(vid, be16_to_cpu(proto)); + + ne6xvf_del_vlan(adapter, vlan); + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +/** + *__ne6xvf_setup_tc - configure multiple traffic classes + * @netdev: network interface device structure + * @type_data: tc offload data + * + * This function processes the config information provided by the + * user to configure traffic classes/queue channels and packages the + * information to request the PF to setup traffic classes. + * + * Returns 0 on success. + **/ +static int __ne6xvf_setup_tc(struct net_device *netdev, void *type_data) +{ + return 0; +} + +/** + * ne6xvf_setup_tc - configure multiple traffic classes + * @dev: network interface device structure + * @type: type of offload + * @type_data: tc offload data + * + * This function is the callback to ndo_setup_tc in the + * netdev_ops. + * + * Returns 0 on success + **/ +static int ne6xvf_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + return __ne6xvf_setup_tc(dev, type_data); +} + +/** + * ne6xvf_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @dev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t ne6xvf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + + return features; + +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +/** + * ne6xvf_fwd_add_macvlan - Configure MACVLAN interface + * @netdev: Main net device to configure + * @vdev: MACVLAN subordinate device + */ +static void *ne6xvf_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_macvlan *mv = NULL; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, vdev->dev_addr); + mv = devm_kzalloc(&adapter->pdev->dev, sizeof(*mv), GFP_KERNEL); + if (!mv) + return NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + ne6xvf_addr_sync(netdev, mac); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + INIT_LIST_HEAD(&mv->list); + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &adapter->macvlan_list); + netdev_info(netdev, "MACVLAN offloads for %s are on\n", vdev->name); + + return mv; +} + +/** + * ne6xvf_fwd_del_macvlan - Delete MACVLAN interface resources + * @netdev: Main net device + * @accel_priv: MACVLAN sub ordinate device + */ +static void ne6xvf_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct ne6x_macvlan *mv = (struct ne6x_macvlan *)accel_priv; + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (!accel_priv) + return; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + ne6xvf_addr_unsync(netdev, mv->mac); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + list_del(&mv->list); + devm_kfree(&adapter->pdev->dev, mv); + + netdev_info(netdev, "MACVLAN offloads for %s are off\n", mv->vdev->name); +} + +static const struct net_device_ops ne6xvf_netdev_ops = { + .ndo_open = ne6xvf_open, + .ndo_stop = ne6xvf_close, + .ndo_start_xmit = ne6xvf_lan_xmit_frame, + .ndo_get_stats = ne6xvf_get_stats, + .ndo_set_rx_mode = ne6xvf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ne6xvf_set_mac, + .ndo_do_ioctl = ne6xvf_do_ioctl, + .ndo_change_mtu = ne6xvf_change_mtu, + .ndo_tx_timeout = ne6xvf_tx_timeout, + + .ndo_vlan_rx_add_vid = ne6xvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6xvf_vlan_rx_kill_vid, + + .ndo_vlan_rx_add_vid = ne6xvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6xvf_vlan_rx_kill_vid, + + .ndo_setup_tc = ne6xvf_setup_tc, + .ndo_features_check = ne6xvf_features_check, + + .ndo_dfwd_add_station = ne6xvf_fwd_add_macvlan, + .ndo_dfwd_del_station = ne6xvf_fwd_del_macvlan, + + .ndo_fix_features = ne6xvf_fix_features, + .ndo_set_features = ne6xvf_set_features, +}; + +static int ne6xvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ne6xvf_adapter *adapter = NULL; + struct ne6xvf_hw *hw = NULL; + struct net_device *netdev; + char name[IFNAMSIZ] = {0}; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_regions(pdev, ne6xvf_driver_name); + if (err) { + dev_err(pci_dev_to_dev(pdev), "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + + sprintf(name, "enp%ds%df%d", pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + netdev = alloc_netdev_mq(sizeof(struct ne6xvf_adapter), name, NET_NAME_USER, ether_setup, + NE6XVF_MAX_REQ_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + + hw = &adapter->hw; + hw->back = adapter; + + ne6xvf_change_state(adapter, __NE6XVF_STARTUP); + + pci_save_state(pdev); + + hw->hw_addr0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + hw->hw_addr2 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); + + if (!hw->hw_addr0 || !hw->hw_addr2) { + err = -EIO; + goto err_ioremap; + } + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; + + ne6xvf_init_spinlock(&hw->mbx.mbx_spinlock); + spin_lock_init(&adapter->mac_vlan_list_lock); + + INIT_LIST_HEAD(&adapter->mac_filter_list); + INIT_LIST_HEAD(&adapter->vlan_filter_list); + INIT_LIST_HEAD(&adapter->macvlan_list); + + INIT_WORK(&adapter->sdk_task, ne6xvf_sdk_task); + INIT_DELAYED_WORK(&adapter->watchdog_task, ne6xvf_watchdog_task); + + init_waitqueue_head(&adapter->down_waitqueue); + init_waitqueue_head(&adapter->vc_waitqueue); + + ne6xvf_startup(adapter); + ne6xvf_init_get_resources(adapter); + adapter->aq_required = 0; + ne6xvf_init_process_extended_caps(adapter); + ne6xvf_init_config_adapter(adapter); + + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); + + ne6xvf_dbg_pf_init(adapter); + + hw->debug_mask = 0xffffffff; + return 0; +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ne6xvf_irq_enable_queues - Enable interrupt for specified queues + * @adapter: board private structure + * @mask: bitmap of queues to enable + **/ +void ne6xvf_irq_enable_queues(struct ne6xvf_adapter *adapter, u32 mask) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_msix_vectors; i++) + wr64(hw, NE6XVF_REG_ADDR(i, NE6X_VP_INT_MASK), ~(1ULL << NE6X_VP_CQ_INTSHIFT)); +} + +/** + * ne6xvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + * @flush: boolean value whether to run rd32() + **/ +void ne6xvf_irq_enable(struct ne6xvf_adapter *adapter, bool flush) +{ + ne6xvf_irq_enable_queues(adapter, ~0); +} + +void ne6xvf_free_all_tg_resources(struct ne6xvf_adapter *adapter) +{ + int i; + + if (!adapter->tg_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tg_rings[i].desc) { + struct ne6x_ring *tg_ring = &adapter->tg_rings[i]; + /* Zero out the descriptor ring */ + memset(tg_ring->desc, 0, tg_ring->size); + tg_ring->next_to_use = 0; + tg_ring->next_to_clean = 0; + + if (!tg_ring->netdev) + return; + + dma_free_coherent(tg_ring->dev, tg_ring->size, tg_ring->desc, tg_ring->dma); + tg_ring->desc = NULL; + } +} + +void ne6xvf_free_all_cq_resources(struct ne6xvf_adapter *adapter) +{ + int i; + + if (!adapter->cq_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->cq_rings[i].desc) { + struct ne6x_ring *cq_ring = &adapter->cq_rings[i]; + /* Zero out the descriptor ring */ + memset(cq_ring->desc, 0, cq_ring->size); + cq_ring->next_to_use = 0; + cq_ring->next_to_clean = 0; + + if (!cq_ring->netdev) + return; + + dma_free_coherent(cq_ring->dev, cq_ring->size, cq_ring->desc, cq_ring->dma); + cq_ring->desc = NULL; + } +} + +void ne6xvf_free_all_tx_resources(struct ne6xvf_adapter *adapter) +{ + unsigned long bi_size; + int i, idx; + + if (!adapter->tx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tx_rings[i].desc) { + struct ne6x_ring *tx_ring = &adapter->tx_rings[i]; + + /* ring already cleared, nothing to do */ + if (tx_ring->tx_buf) { + /* Free all the Tx ring sk_buffs */ + for (idx = 0; idx < tx_ring->count; idx++) + ne6xvf_unmap_and_free_tx_resource(tx_ring, + &tx_ring->tx_buf[idx]); + + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, bi_size); + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + if (tx_ring->netdev) + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + } + + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + kfree(tx_ring->sgl); + } +} + +void ne6xvf_free_all_rx_resources(struct ne6xvf_adapter *adapter) +{ + unsigned long bi_size; + int i, idx; + + if (!adapter->rx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->rx_rings[i].desc) { + struct ne6x_ring *rx_ring = &adapter->rx_rings[i]; + /* ring already cleared, nothing to do */ + if (rx_ring->rx_buf) { + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (idx = 0; idx < rx_ring->count; idx++) { + struct ne6x_rx_buf *rx_bi = &rx_ring->rx_buf[idx]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, + rx_bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; + } + + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + } +} + +void ne6xvf_reset_interrupt_capability(struct ne6xvf_adapter *adapter) +{ + if (!adapter->msix_entries) + return; + + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +static void ne6xvf_remove(struct pci_dev *pdev) +{ + struct ne6xvf_adapter *adapter = ne6xvf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *vlf, *vlftmp; + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_mac_filter *f, *ftmp; + struct ne6x_macvlan *mv, *mv_tmp; + int i; + + ne6xvf_dbg_pf_exit(adapter); + + set_bit(__NE6XVF_IN_REMOVE_TASK, &adapter->crit_section); + cancel_work_sync(&adapter->sdk_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + + if (adapter->netdev_registered) { + /* This will call ne6xvf_close if the device was open previously. + * The Admin Queue and watchdog tasks have already been shut + * down at this point so the driver will rely on + * ne6xvf_request_reset below to disable the queues and handle + * any other Admin Queue-based cleanup normally done as part of + * ne6xvf_close. + */ + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + dev_info(&adapter->pdev->dev, "Removing device\n"); + + /* Shut down all the garbage mashers on the detention level */ + ne6xvf_change_state(adapter, __NE6XVF_REMOVE); + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + + ne6xvf_request_reset(adapter); + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + + if (adapter->last_state == __NE6XVF_RESETTING || + (adapter->last_state == __NE6XVF_RUNNING && !(netdev->flags & IFF_UP))) + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + + ne6xvf_destroy_spinlock(&hw->mbx.mbx_spinlock); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + /* release vsi vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry_safe(mv, mv_tmp, &adapter->macvlan_list, list) + ne6xvf_fwd_del_macvlan(netdev, mv); + + iounmap(hw->hw_addr0); + iounmap(hw->hw_addr2); + pci_release_regions(pdev); + + ne6xvf_free_queues(adapter); + kfree(adapter->vf_res); + adapter->vf_res = NULL; + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static struct pci_driver ne6xvf_driver = { + .name = ne6xvf_driver_name, + .id_table = ne6xvf_pci_tbl, + .probe = ne6xvf_probe, + .remove = ne6xvf_remove, +}; + +static int __init ne6xvf_init_module(void) +{ + int ret; + + pr_info("navf: %s - version %s\n", ne6xvf_driver_string, ne6xvf_driver_version); + + pr_info("%s\n", ne6xvf_copyright); + + ne6xvf_wq = create_singlethread_workqueue(ne6xvf_driver_name); + if (!ne6xvf_wq) { + pr_err("%s: Failed to create workqueue\n", ne6xvf_driver_name); + return -ENOMEM; + } + + ne6xvf_dbg_init(); + + ret = pci_register_driver(&ne6xvf_driver); + + return ret; +} + +module_init(ne6xvf_init_module); + +/** + * ne6xvf_exit_module - Driver Exit Cleanup Routine + * + * ne6xvf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ne6xvf_exit_module(void) +{ + pci_unregister_driver(&ne6xvf_driver); + destroy_workqueue(ne6xvf_wq); + ne6xvf_dbg_exit(); +} + +module_exit(ne6xvf_exit_module); diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h new file mode 100644 index 000000000000..600dd9f77366 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_OSDEP_H +#define _NE6XVF_OSDEP_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +inline void ne6xvf_init_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_destroy_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_acquire_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_release_spinlock_d(struct ne6xvf_spinlock *sp); + +#endif /* _NE6XVF_OSDEP_H */ + diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c new file mode 100644 index 000000000000..7ba4a802d5b7 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_txrx.h" + +/** + * ne6xvf_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void ne6xvf_update_enable_itr(struct ne6x_q_vector *q_vector) +{ + struct ne6xvf_adapter *adpt = (struct ne6xvf_adapter *)q_vector->adpt; + struct ne6xvf_hw *hw = &adpt->hw; + + if (!test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + struct ne6x_ring *cq_ring = NULL; + + cq_ring = q_vector->cq.ring; + if (cq_ring->next_to_clean != cq_ring->next_to_use) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + wr64(hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT), + (1ULL << NE6X_VP_CQ_INTSHIFT)); + wr64(hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK), + ~(1ULL << NE6X_VP_CQ_INTSHIFT)); + } +} + +/** + * ne6xvf_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +void ne6xvf_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +/** + * ne6xvf_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int ne6xvf_napi_poll(struct napi_struct *napi, int budget) +{ + struct ne6x_q_vector *q_vector = container_of(napi, struct ne6x_q_vector, napi); + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)q_vector->adpt; + struct ne6x_ring *ring = NULL; + bool clean_complete = true; + int cq_budget = 16; + int work_done = 0; + int cleaned = 0; + + ring = q_vector->cq.ring; + + if (test_bit(NE6X_ADPT_DOWN, comm->state)) { + napi_complete(napi); + return 0; + } + + cleaned = ne6x_clean_cq_irq(q_vector, ring, cq_budget); + if (cleaned >= cq_budget) + clean_complete = false; + + ring = q_vector->tx.ring; + if (!ne6x_clean_tx_irq(comm, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + ring = q_vector->rx.ring; + cleaned = ne6x_clean_rx_irq(ring, budget); + if (cleaned >= budget) + clean_complete = false; + + work_done += cleaned; + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + ne6xvf_update_enable_itr(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + ne6xvf_update_enable_itr(q_vector); + + return min(work_done, budget - 1); +} + +netdev_tx_t ne6xvf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; + struct ne6x_ring *tag_ring = &adapter->tg_rings[skb->queue_mapping]; + struct sk_buff *trailer; + int tailen, nsg; + bool jumbo_frame = true; + + tailen = 4; + + if (skb_put_padto(skb, NE6X_MIN_TX_LEN)) + return NETDEV_TX_OK; + + if (skb->len < NE6X_MAX_DATA_PER_TXD) { + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + netdev_err(netdev, "TX: skb_cow_data() returned %d\n", nsg); + return nsg; + } + + pskb_put(skb, trailer, tailen); + jumbo_frame = false; + } + + if (netdev->gso_max_size < skb->len) + netdev_err(netdev, "%s: skb->len = %d > 15360\n", __func__, skb->len); + + return ne6x_xmit_frame_ring(skb, tx_ring, tag_ring, jumbo_frame); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h new file mode 100644 index 000000000000..0a10c04862a2 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_TXRX_H +#define _NE6XVF_TXRX_H + +void ne6xvf_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer); +int ne6xvf_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t ne6xvf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c new file mode 100644 index 000000000000..9d6cb823863c --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c @@ -0,0 +1,1123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_osdep.h" + +int ne6xvf_sdk_send_msg_to_pf(struct ne6xvf_hw *hw, enum virtchnl_ops v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen, + void *cmd_details) +{ + union u_ne6x_mbx_snap_buffer_data mbx_buffer; + + ne6xvf_acquire_spinlock(&hw->mbx.mbx_spinlock); + + mbx_buffer.snap.data[0] = 0; + mbx_buffer.snap.data[1] = 0; + mbx_buffer.snap.data[2] = 0; + mbx_buffer.snap.data[3] = 0; + mbx_buffer.snap.data[4] = 0; + mbx_buffer.snap.data[5] = 0; + + if (msglen) { + if (msglen > NE6XVF_SDK_LARGE_BUF) { + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return NE6XVF_ERR_INVALID_SIZE; + } + + memcpy(mbx_buffer.snap.data, msg, msglen); + } + + mbx_buffer.snap.len = msglen; + mbx_buffer.snap.type = v_opcode; + mbx_buffer.snap.state = v_retval; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_MAILBOX_DATA), mbx_buffer.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x2); + + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + + return 0; +} + +int ne6xvf_send_pf_msg(struct ne6xvf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int err; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + return 0; /* nothing to see here, move along */ + + err = ne6xvf_sdk_send_msg_to_pf(hw, op, VIRTCHNL_STATUS_SUCCESS, msg, len, NULL); + if (err) + dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %d, sdk_err %s\n", + op, err, hw->err_str); + + return err; +} + +/** + * ne6xvf_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +enum ne6xvf_status ne6xvf_clean_arq_element(struct ne6xvf_hw *hw, struct ne6xvf_arq_event_info *e, + u16 *pending) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + enum ne6xvf_status ret_code = 0; + u64 val; + int i; + + ne6xvf_acquire_spinlock(&hw->mbx.mbx_spinlock); + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + if (val & 0x1) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x1); + + if (!(val & 0x2)) { + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return NE6XVF_ERR_NOT_READY; + } + + usnap.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_PF_MAILBOX_DATA)); + e->msg_len = min_t(u16, (u16)usnap.snap.len, e->buf_len); + if (e->msg_buf && e->msg_len != 0) { + for (i = 0; i < e->msg_len && i < NE6XVF_SDK_LARGE_BUF; i++) { + e->msg_buf[i] = usnap.snap.data[i]; + e->snap.data[i] = usnap.snap.data[i]; + } + } + + e->snap.type = usnap.snap.type; + e->snap.state = usnap.snap.state; + + if (pending) + *pending = 0; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x2); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x1); + + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return ret_code; +} + +/** + * ne6xvf_poll_virtchnl_msg - poll for virtchnl msg matching the requested_op + * @adapter: adapter structure + * @event: event to populate on success + * @op_to_poll: requested virtchnl op to poll for + */ +int ne6xvf_poll_virtchnl_msg(struct ne6xvf_adapter *adapter, struct ne6xvf_arq_event_info *event, + enum virtchnl_ops op_to_poll) +{ + struct ne6xvf_arq_event_info rece_event; + struct ne6xvf_hw *hw = &adapter->hw; + enum ne6xvf_status status, v_ret; + enum virtchnl_ops received_op; + int timeout = 50000; + int i; + + rece_event.buf_len = NE6XVF_MAX_AQ_BUF_SIZE; + rece_event.msg_buf = kzalloc(rece_event.buf_len, GFP_KERNEL); + if (!rece_event.msg_buf) + return NE6XVF_ERR_NO_MEMORY; + + while (1) { + /* When the SDK is empty, ne6xvf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + status = ne6xvf_clean_arq_element(hw, &rece_event, NULL); + if (status) { + if (status == NE6XVF_ERR_NOT_READY && timeout) { + usleep_range(10, 12); + timeout--; + continue; + } + kfree(rece_event.msg_buf); + return status; + } + + received_op = (enum virtchnl_ops)le32_to_cpu(rece_event.snap.type); + v_ret = (enum ne6xvf_status)le32_to_cpu(rece_event.snap.state); + if (op_to_poll == received_op) { + memcpy(&event->snap, &rece_event.snap, + sizeof(struct ne6x_mbx_snap_buffer_data)); + event->msg_len = min(rece_event.msg_len, event->buf_len); + if (event->msg_buf) { + for (i = 0; i < event->msg_len && i < NE6XVF_SDK_LARGE_BUF; i++) + event->msg_buf[i] = rece_event.msg_buf[i]; + } + break; + } + + ne6xvf_virtchnl_completion(adapter, received_op, v_ret, rece_event.msg_buf, + rece_event.msg_len); + } + + kfree(rece_event.msg_buf); + status = (enum ne6xvf_status)le32_to_cpu(event->snap.state); + + return status; +} + +int ne6xvf_request_reset(struct ne6xvf_adapter *adapter) +{ + int status; + + if (!adapter->vf_res) + return 0; + /* Don't check CURRENT_OP - this is always higher priority */ + status = ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, + &adapter->vf_res->vsi_res[0].default_mac_addr[0], 6); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + return status; +} + +int ne6xvf_send_api_ver(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct ne6xvf_virtchnl_version_info vvi; + + vvi.major = NE6XVF_VIRTCHNL_VERSION_MAJOR; + vvi.minor = NE6XVF_VIRTCHNL_VERSION_MINOR; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, sizeof(vvi)); + usleep_range(10, 12); + return ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_VERSION); +} + +/** + * ne6xvf_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void ne6xvf_vf_parse_hw_config(struct ne6xvf_hw *hw, struct virtchnl_vf_resource *msg) +{ + struct virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + + hw->dev_caps.max_mtu = msg->max_mtu; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == NE6XVF_VIRTCHNL_VSI_SRIOV) { + ether_addr_copy(hw->mac.perm_addr, vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, vsi_res->default_mac_addr); + } + vsi_res++; + } +} + +/** + * ne6xvf_get_vf_config + * @adapter: private adapter structure + * + * Get VF configuration from PF and populate hw structure. Must be called after + * admin queue is initialized. Busy waits until response is received from PF, + * with maximum timeout. Response from PF is returned in the buffer for further + * processing by the caller. + **/ +int ne6xvf_get_vf_config(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_arq_event_info event; + int err; + + event.buf_len = sizeof(struct ne6x_mbx_snap_buffer_data); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + err = ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_GET_VF_RESOURCES); + + hw->dev_caps.vf_id = event.msg_buf[0]; + hw->dev_caps.chip_id = 0x0; + hw->dev_caps.lport = event.msg_buf[1]; + hw->dev_caps.mac_id = event.msg_buf[2]; + hw->dev_caps.base_queue = event.msg_buf[3]; + hw->dev_caps.num_vf_per_pf = event.msg_buf[5]; + adapter->vf_res->num_vsis = 0x1; + adapter->vf_res->num_queue_pairs = event.msg_buf[4]; + adapter->vf_res->max_vectors = event.msg_buf[4]; + adapter->vf_res->vsi_res[0].vsi_type = NE6XVF_VIRTCHNL_VSI_SRIOV; + + adapter->comm.port_info = hw->dev_caps.lport | (hw->dev_caps.vf_id << 8); + + dev_info(&adapter->pdev->dev, "vf %d Get Resource [ lport: %d, mac_id: %d, base: %d, queue: %d, err = %d]\n", + hw->dev_caps.vf_id, hw->dev_caps.lport, hw->dev_caps.mac_id, + hw->dev_caps.base_queue, adapter->vf_res->num_queue_pairs, err); + + ne6xvf_vf_parse_hw_config(hw, adapter->vf_res); + + return err; +} + +int ne6xvf_config_default_vlan(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + struct ne6x_vf_vlan vlan; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + event.buf_len = 0; + event.msg_buf = NULL; + + vlan = NE6X_VF_VLAN(0xfff, ETH_P_8021Q); + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)&vlan, sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_VLAN); + + return 0; +} + +/** + * ne6xvf_send_vf_config_msg + * @adapter: adapter structure + * + * Send VF configuration request admin queue message to the PF. The reply + * is not checked in this function. Returns 0 if the message was + * successfully sent, or one of the NE6XVF_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int ne6xvf_send_vf_config_msg(struct ne6xvf_adapter *adapter, bool b_init) +{ + u8 mac_addr[ETH_ALEN]; + + adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_CONFIG; + if (b_init) { + eth_random_addr(mac_addr); + mac_addr[0] = 0x02; + mac_addr[1] = 0x31; + mac_addr[2] = 0x3a; + } else { + memcpy(mac_addr, adapter->vf_res->vsi_res[0].default_mac_addr, 6); + } + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, mac_addr, 6); + + /* mac addr need get for PF */ + adapter->vf_res->vsi_res[0].default_mac_addr[0] = mac_addr[0]; + adapter->vf_res->vsi_res[0].default_mac_addr[1] = mac_addr[1]; + adapter->vf_res->vsi_res[0].default_mac_addr[2] = mac_addr[2]; + adapter->vf_res->vsi_res[0].default_mac_addr[3] = mac_addr[3]; + adapter->vf_res->vsi_res[0].default_mac_addr[4] = mac_addr[4]; + adapter->vf_res->vsi_res[0].default_mac_addr[5] = mac_addr[5]; + adapter->vf_res->vsi_res[0].vsi_type = NE6XVF_VIRTCHNL_VSI_SRIOV; + + return 0; +} + +int ne6xvf_send_vf_offload_msg(struct ne6xvf_adapter *adapter) +{ + adapter->current_op = VIRTCHNL_OP_CONFIG_OFFLOAD; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + dev_info(&adapter->pdev->dev, "adapter->hw_feature = 0x%08X\n", adapter->hw_feature); + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_OFFLOAD, (u8 *)&adapter->hw_feature, 4); + + return 0; +} + +void ne6xvf_config_rss_info(struct ne6xvf_adapter *adapter) +{ + int count, size = sizeof(struct ne6x_rss_info); + int index, status; + u8 *plut_info = (u8 *)&adapter->rss_info; + struct ne6xvf_arq_event_info event; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot Configure RSS, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS; + + count = (size + NE6XVF_SDK_LARGE_BUF - 1) / NE6XVF_SDK_LARGE_BUF; + + for (index = 0; index < count; index++) { + event.buf_len = 0; + event.msg_buf = NULL; + status = ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS, + &plut_info[index * NE6XVF_SDK_LARGE_BUF], + ((size - index * NE6XVF_SDK_LARGE_BUF) > + NE6XVF_SDK_LARGE_BUF) + ? NE6XVF_SDK_LARGE_BUF + : (size - index * NE6XVF_SDK_LARGE_BUF)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_CONFIG_RSS); + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_RSS; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +void ne6xvf_changed_rss(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot Configure RSS, command %d pending\n", + adapter->current_op); + return; + } + + event.msg_buf = NULL; + event.buf_len = 0; + + adapter->current_op = VIRTCHNL_OP_CHANGED_RSS; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CHANGED_RSS, (u8 *)&adapter->num_active_queues, + sizeof(adapter->num_active_queues)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_CHANGED_RSS); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CHANGED_RSS; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +int ne6xvf_request_feature(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request feature, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + adapter->current_op = VIRTCHNL_OP_GET_VF_FEATURE; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_FEATURE; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_FEATURE, NULL, 0); + + return 0; +} + +/** + * ne6xvf_request_stats + * @adapter: adapter structure + * + * Request VSI statistics from PF. + **/ +void ne6xvf_request_stats(struct ne6xvf_adapter *adapter) +{ + ne6xvf_update_pf_stats(adapter); +} + +/** + * ne6xvf_request_queues + * @adapter: adapter structure + * @num: number of requested queues + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int ne6xvf_request_queues(struct ne6xvf_adapter *adapter, int num) +{ + struct ne6xvf_virtchnl_vf_res_request vfres; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + vfres.num_queue_pairs = 1; + vfres.need_reset = 0x0; + + adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; + adapter->flags |= NE6XVF_FLAG_REINIT_ITR_NEEDED; + + return ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, (u8 *)&vfres, sizeof(vfres)); +} + +/** + * ne6xvf_enable_queues + * @adapter: adapter structure + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int ne6xvf_enable_queues(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, NULL, 0); + return 0; +} + +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + int status; + + event.buf_len = sizeof(struct ne6x_mbx_snap_buffer_data); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + status = ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_GET_VF_FEATURE); + if (status == 0) { + adapter->hw_feature = event.snap.data[3]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[2]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[1]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[0]; + dev_info(&adapter->pdev->dev, "vf %d get feature 0x%08X\n", + adapter->hw.dev_caps.vf_id, adapter->hw_feature); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_FEATURE; + kfree(event.msg_buf); + + return status; +} + +/** + * ne6xvf_add_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void ne6xvf_add_ether_addrs(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct virtchnl_ether_addr_list *veal; + struct ne6xvf_mac_filter *f; + int len, i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + len = struct_size(veal, list, count); + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + f->add = false; + if (i == count) + break; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal->list[i].addr, 6); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_ETH_ADDR); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(veal); +} + +void ne6xvf_set_vf_addr(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_SET_VF_ADDR; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_SET_VF_ADDR, adapter->hw.mac.addr, 6); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_SET_VF_MAC; +} + +/** + * ne6xvf_del_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void ne6xvf_del_ether_addrs(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct virtchnl_ether_addr_list *veal; + struct ne6xvf_mac_filter *f, *temp; + int len, i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->remove) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; + + len = struct_size(veal, list, count); + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry_safe(f, temp, &adapter->mac_filter_list, list) { + if (f->remove) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal->list[i].addr, 6); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_DEL_ETH_ADDR); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(veal); +} + +#define NE6XVF_MAX_SPEED_STRLEN 13 + +/** + * ne6xvf_print_link_message - print link up or down + * @adapter: adapter structure + * + * Log a message telling the world of our wonderous link status + */ +static void ne6xvf_print_link_message(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int link_speed_mbps; + char *speed; + + if (!adapter->link_up) { + netdev_info(netdev, "NIC Link is Down\n"); + return; + } + + speed = kcalloc(1, NE6XVF_MAX_SPEED_STRLEN, GFP_KERNEL); + if (!speed) + return; + + switch (adapter->link_speed) { + case NE6X_LINK_SPEED_100GB: + link_speed_mbps = SPEED_100000; + break; + case NE6X_LINK_SPEED_40GB: + link_speed_mbps = SPEED_40000; + break; + case NE6X_LINK_SPEED_25GB: + link_speed_mbps = SPEED_25000; + break; + case NE6X_LINK_SPEED_10GB: + link_speed_mbps = SPEED_10000; + break; + default: + link_speed_mbps = SPEED_UNKNOWN; + break; + } + + snprintf(speed, NE6XVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps / 1000, "Gbps"); + + netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); + + kfree(speed); +} + +/** + * ne6xvf_set_promiscuous + * @adapter: adapter structure + * @flags: bitmask to control unicast/multicast promiscuous. + * + * Request that the PF enable promiscuous mode for our VSI. + **/ +void ne6xvf_set_promiscuous(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_virtchnl_promisc_info vpi; + int flags = 0; + + dev_warn(&adapter->pdev->dev, "%s: ....\n", __func__); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + + if (adapter->flags & NE6XVF_FLAG_PROMISC_ON) { + adapter->hw_feature |= NE6X_F_PROMISC; + flags |= FLAG_VF_UNICAST_PROMISC; + } else { + adapter->hw_feature &= ~NE6X_F_PROMISC; + } + + if (adapter->flags & NE6XVF_FLAG_ALLMULTI_ON) { + adapter->hw_feature |= NE6X_F_RX_ALLMULTI; + flags |= FLAG_VF_MULTICAST_PROMISC; + } else { + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + } + + vpi.vsi_id = adapter->vsi_res->vsi_id; + vpi.flags = flags; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, (u8 *)&vpi, sizeof(vpi)); +} + +void ne6xvf_vchanel_get_port_link_status(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + u8 msg[8] = {0}; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot get_link_status, command %d pending\n", + adapter->current_op); + return; + } + + /* pass queue info to vf */ + msg[0] = hw->dev_caps.base_queue; + msg[1] = adapter->num_active_queues; + + adapter->current_op = VIRTCHNL_OP_GET_PORT_STATUS; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_PORT_STATUS, msg, 2); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS; +} + +/** + * ne6xvf_virtchnl_completion + * @adapter: adapter structure + * @v_opcode: opcode sent by PF + * @v_retval: retval sent by PF + * @msg: message sent by PF + * @msglen: message length + * + * Asynchronous completion function for admin queue messages. Rather than busy + * wait, we fire off our requests and assume that no errors will be returned. + * This function handles the reply messages. + **/ +void ne6xvf_virtchnl_completion(struct ne6xvf_adapter *adapter, enum virtchnl_ops v_opcode, + enum ne6xvf_status v_retval, u8 *msg, u16 msglen) +{ + struct net_device *netdev = adapter->netdev; + + if (v_opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; + bool link_up = vpe->link_status; + enum ne6x_sdk_link_speed old_link_speed = adapter->link_speed; + + switch (vpe->event) { + case NE6XVF_VIRTCHNL_EVENT_LINK_CHANGE: + adapter->link_speed = (vpe->link_speed_0 << 24) | + (vpe->link_speed_1 << 16) | + (vpe->link_speed_2 << 8) | + vpe->link_speed_3; + if (adapter->current_op == VIRTCHNL_OP_GET_PORT_STATUS) + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + /* we've already got the right link status, bail */ + if (adapter->link_up == link_up) { + if (link_up && old_link_speed != adapter->link_speed) + ne6xvf_print_link_message(adapter); + + break; + } + + if (link_up) { + /* If we get link up message and start queues + * before our queues are configured it will + * trigger a TX hang. In that case, just ignore + * the link status message,we'll get another one + * after we enable queues and actually prepared + * to send traffic. + */ + if (adapter->state != __NE6XVF_RUNNING) + break; + + /* For ADQ enabled VF, we reconfigure VSIs and + * re-allocate queues. Hence wait till all + * queues are enabled. + */ + if (adapter->flags & NE6XVF_FLAG_QUEUES_DISABLED) + break; + } + + adapter->link_up = link_up; + if (link_up) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + ne6xvf_print_link_message(adapter); + break; + case NE6XVF_VIRTCHNL_EVENT_RESET_IMPENDING: + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); + break; + default: + dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", vpe->event); + break; + } + return; + } + + if (v_opcode == VIRTCHNL_OP_VF_CONFIG) { + struct virtchnl_vf_config *vfconfig = (struct virtchnl_vf_config *)msg; + + dev_info(&adapter->pdev->dev, "vf_vonfig_data from the PF,type= %d,value = %d\n", + vfconfig->type, vfconfig->data[0]); + switch (vfconfig->type) { + case VIRTCHNL_VF_CONFIG_TRUST: + adapter->trusted = vfconfig->data[0]; + if (!adapter->trusted) { + adapter->hw_feature &= ~NE6X_F_PROMISC; + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + } + break; + default: + break; + } + return; + } + + if (v_retval) { + switch (v_opcode) { + case VIRTCHNL_OP_SET_VF_ADDR: + dev_err(&adapter->pdev->dev, "Failed to change MAC address\n"); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + wake_up(&adapter->vc_waitqueue); + if (adapter->current_op != VIRTCHNL_OP_SET_VF_ADDR) + return; + + break; + default: + dev_err(&adapter->pdev->dev, "PF returned error %d to our request %d\n", + v_retval, v_opcode); + + /* Assume that the ADQ configuration caused one of the + * v_opcodes in this if statement to fail. Set the + * flag so the reset path can return to the pre-ADQ + * configuration and traffic can resume + */ + if ((v_opcode == VIRTCHNL_OP_ENABLE_QUEUES || + v_opcode == VIRTCHNL_OP_CONFIG_IRQ_MAP || + v_opcode == VIRTCHNL_OP_CONFIG_ADPT_QUEUES)) { + dev_err(&adapter->pdev->dev, + "ADQ is enabled and opcode %d failed (%d)\n", v_opcode, + v_retval); + netdev_reset_tc(netdev); + adapter->flags |= NE6XVF_FLAG_REINIT_ITR_NEEDED; + ne6xvf_schedule_reset(adapter); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + return; + } + } + } + + switch (v_opcode) { + case VIRTCHNL_OP_SET_VF_ADDR: + if (!v_retval) { + if (msglen != 0 && msg) { + netif_addr_lock_bh(netdev); + ether_addr_copy(adapter->hw.mac.addr, msg); + eth_hw_addr_set(netdev, msg); + netif_addr_unlock_bh(netdev); + } + } + wake_up(&adapter->vc_waitqueue); + if (adapter->current_op == VIRTCHNL_OP_SET_VF_ADDR) + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + memcpy(adapter->vf_res, msg, msglen); + ne6xvf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + netif_addr_lock_bh(netdev); + /* refresh current mac address if changed */ + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + netif_addr_unlock_bh(netdev); + } + + ne6xvf_parse_vf_resource_msg(adapter); + break; + case VIRTCHNL_OP_GET_VF_FEATURE: + memcpy(&adapter->hw_feature, msg, 4); + dev_info(&adapter->pdev->dev, "%s: hw_featrue = 0x%08X\n", + ne6xvf_state_str(adapter->state), adapter->hw_feature); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + /* enable transmits */ + if (adapter->state == __NE6XVF_RUNNING) { + ne6xvf_irq_enable(adapter, true); + /* If queues not enabled when handling link event, + * then set carrier on now + */ + if (adapter->link_up && !netif_carrier_ok(netdev)) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } + } + adapter->flags |= NE6XVF_FLAG_QUEUES_ENABLED; + adapter->flags &= ~NE6XVF_FLAG_QUEUES_DISABLED; + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + if (adapter->state == __NE6XVF_DOWN_PENDING) + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + + adapter->flags &= ~NE6XVF_FLAG_QUEUES_ENABLED; + break; + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + /* Don't display an error if we get these out of sequence. + * If the firmware needed to get kicked, we'll get these and + * it's no problem. + */ + if (v_opcode != adapter->current_op) + return; + + break; + case VIRTCHNL_OP_REQUEST_QUEUES: { + struct ne6xvf_virtchnl_vf_res_request *vfres = + (struct ne6xvf_virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs != adapter->num_req_queues) { + dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", + adapter->num_req_queues, vfres->num_queue_pairs); + adapter->num_req_queues = 0; + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + } + } break; + default: + if (adapter->current_op && v_opcode != adapter->current_op) + dev_dbg(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", + adapter->current_op, v_opcode); + + break; + } /* switch v_opcode */ + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +/** + * ne6xvf_add_vlans + * @adapter: adapter structure + * + * Request that the PF add one or more VLAN filters to our VSI. + **/ +void ne6xvf_add_vlans(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {0}; + struct ne6xvf_vlan_filter *f = NULL; + struct ne6x_vf_vlan *vlan = NULL; + int len = 0, i = 0, count = 0; + + dev_info(&adapter->pdev->dev, "%s: adapter->current_op:%d\n", __func__, + adapter->current_op); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(struct ne6x_vf_vlan) * count; + vlan = kzalloc(len, GFP_ATOMIC); + if (!vlan) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vlan[i].tpid = f->vlan.tpid; + vlan[i].vid = f->vlan.vid; + i++; + f->add = false; + f->is_new_vlan = true; + if (i == count) + break; + } + } + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)&vlan[i], + sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_VLAN); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + kfree(vlan); +} + +/** + * ne6xvf_del_vlans + * @adapter: adapter structure + * + * Request that the PF remove one or more VLAN filters from our VSI. + **/ +void ne6xvf_del_vlans(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {0}; + struct ne6xvf_vlan_filter *f, *ftmp; + struct ne6x_vf_vlan *vlan = NULL; + int i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; + vlan = kcalloc(count, sizeof(*vlan), GFP_ATOMIC); + if (!vlan) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vlan[i].tpid = f->vlan.tpid; + vlan[i].vid = f->vlan.vid; + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)&vlan[i], + sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_DEL_VLAN); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + kfree(vlan); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h new file mode 100644 index 000000000000..1fae0b1922dc --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_VIRTCHNL_H +#define _NE6XVF_VIRTCHNL_H + +#define NE6XVF_SDK_LARGE_BUF 6 + +struct ne6xvf_spinlock { + /* mutext lock */ + struct mutex spinlock; +}; + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + + /* see enum virtchnl_vsi_type */ + s32 vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 type; + u8 pad; +}; + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[]; +}; + +enum nacf_virtchnl_vsi_type { + NE6XVF_VIRTCHNL_VSI_TYPE_INVALID = 0, + NE6XVF_VIRTCHNL_VSI_SRIOV = 6, +}; + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[]; +}; + +struct ne6xvf_arq_event_info { + struct ne6x_mbx_snap_buffer_data snap; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* VF resource request */ +struct ne6xvf_virtchnl_vf_res_request { + u16 num_queue_pairs; + u8 need_reset; + u8 rsv; +}; + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct ne6xvf_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +union u_ne6x_mbx_snap_buffer_data { + struct ne6x_mbx_snap_buffer_data snap; + u64 val; +}; + +struct ne6xvf_sdk_mbx_info { + struct ne6xvf_spinlock mbx_spinlock; + struct ne6x_mbx_snap_buffer_data sq_data; + struct ne6x_mbx_snap_buffer_data cq_data; + int init_flag; +}; + +#define NE6XVF_VIRTCHNL_VERSION_MAJOR 1 +#define NE6XVF_VIRTCHNL_VERSION_MINOR 1 + +struct ne6xvf_virtchnl_version_info { + u8 major; + u8 minor; +}; + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum ne6xvf_virtchnl_event_codes { + NE6XVF_VIRTCHNL_EVENT_UNKNOWN = 0, + NE6XVF_VIRTCHNL_EVENT_LINK_CHANGE, + NE6XVF_VIRTCHNL_EVENT_RESET_IMPENDING, + NE6XVF_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + NE6XVF_VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE, +}; + +struct virtchnl_pf_event { + u8 event; + u8 link_speed_0; + u8 link_speed_1; + u8 link_speed_2; + u8 link_speed_3; + u8 link_status; +}; + +#endif -- Gitee